text
stringlengths 1
1.96M
| meta
dict |
|---|---|
\section{Introduction}
We say $\widetilde{K} \subset S^3$ is a \textit{periodic knot} if there is a $\mathbb Z_q$-action on $(S^3, \widetilde{K})$ which preserves $\widetilde{K}$ and whose fixed set is an unknot $U$ disjoint from $\widetilde{K}$. Let $\tau$ be a generator for the action. An important special case is $\tau^2=1$, in which case $\widetilde{K}$ is said to be \textit{doubly-periodic}.
\medskip
The quotient of $(S^3, \widetilde{K})$ under the group action is a second knot $(S^3, K)$ such that the map $(S^3, \widetilde{K}) \rightarrow (S^3,K)$ is an $q$-fold branched cover over $U$. The knot $K$ is said to be the $q$-fold \textit{quotient knot} of $\widetilde{K}$.
\medskip
\begin{figure}
\caption{A doubly-periodic diagram for the trefoil, and its quotient knot (an unknot) under the $\mathbb Z_2$ action.}
\subfloat{\tikzset{every path/.style={line width = 2 pt}}
\begin{tikzpicture}[scale=.5]
\draw (-4,1)--(-1,0);
\draw (1,1) -- (4,0);
\draw (-4,1) ..controls (-4, 6) and (3,6).. (3, .5);
\draw (3,.1) .. controls (3, -3.5) and (-2,-3.5)..(-2,.1);
\draw (-2,.5)..controls (-2,3) and (1,3)..(1,1);
\draw (-1,0)..controls (-1,-2) and (2,-2)..(2,.5);
\draw (2,.85)..controls (2, 4.5) and (-3,4.5)..(-3,.85);
\draw (-3,.5)..controls (-3,-5) and (4,-5)..(4,0);
\end{tikzpicture}}%
\hspace{1 cm}
\subfloat{\centering
\tikzset{every path/.style={line width = 2 pt}}
\begin{tikzpicture}[scale=.5]
\draw (-4,1)--(-1,0);
\draw(-4,1)..controls(-4,6) and (3,6)..(3,.5);
\draw (3,.5) .. controls (3, -4.5) and (-3,-4.5)..(-3,.5);
\draw (-3,.85)..controls (-3,4.5) and (2,4.5)..(2,.6);
\draw (2,.6)..controls (2,-3) and (-2,-3)..(-2,.15);
\draw (-2,.5)..controls (-2,3.1) and (.85,2.7)..(1,1);
\draw (1,1)..controls(1.3,-1.8) and (-1,-2)..(-1,0);
\end{tikzpicture}}%
\end{figure}
Link Floer homology is an invariant of a link in $S^3$ introduced by Ozsv{\'a}th and Szab{\'o} \cite{MR2443092} as a generalization of the knot Floer homology developed in 2003 by Ozsv{\'a}th and Szab{\'o} \cite{MR2065507} and independently by Rasmussen \cite{MR2704683}. The theory associates to $(S^3,L)$ a bigraded $\mathbb F_2$-vector space $\widehat{\mathit{HFL}}(S^3,L)$ which arises as the homology of the Floer chain complex of two Lagrangian tori in the symmetric product of a punctured Heegaard surface for $(S^3,L)$. The graded Euler characteristic of link Floer cohomology is the multivariable Alexander polynomial of the knot multiplied by certain standard factors \cite{MR2443092}; indeed, the theory categorifies the Thurston norm of the knot complement \cite{MR2393424}.
\medskip
In this paper, we will analyze the link Floer homology of a periodic (usually doubly-periodic) knot together with its axis compared to the link Floer homology of the quotient knot together with the axis. We will first construct Heegaard diagrams for $(S^3, \widetilde{K} \cup U)$ which are preserved by the action of $\mathbb Z_q$ and whose quotients under the action are Heegaard diagrams for $(S^3,K)$. These periodic Heegaard diagrams will allow us to give a simple Heegaard Floer reproof of one of Murasugi's conditions for the Alexander polynomial of a periodic knot in the case that $q=p^r$ for some prime $p$. Let $\lambda = \ell k(\widetilde{K}, U) = \ell k(K,U)$.
\begin{theorem} \label{Murasugi Theorem} \cite[Corollary 1]{MR0292060} $\Delta_{\widetilde{K}}(t) \equiv t^{\pm i}(1 + t + \cdots + t^{\lambda-1})^{q-1} (\Delta_{K}(t))^{q}$ modulo $p$.
\end{theorem}
Restricting to the case that $\widetilde{K}$ is doubly periodic, we will proceed to prove the following localization theorem. Let $\mathbb Z_2((\theta)) = \mathbb Z_2[[\theta]](\theta^{-1})$.
\begin{theorem} \label{Link Floer Homology Spectral Sequence}
There is an integer $n_1$ less than half the number of crossings of a periodic diagram $D$ for $\tilde{K}$ such that there is a spectral sequence whose $E^1$ page is $\left(\widehat{\mathit{HFL}}(S^3, \widetilde{K} \cup U) \otimes V^{\otimes (2n_1-1)}\right) \otimes \mathbb Z_2((\theta))$ and whose $E^{\infty}$ page is isomorphic to $\left(\widehat{\mathit{HFL}}(S^3, K \cup U) \otimes V^{\otimes (n_1-1)}\right) \otimes \mathbb Z_2((\theta))$ as $\mathbb Z_2((\theta))$-modules.
\end{theorem}
We shall see that this spectral sequence splits along the Alexander multigrading of $\widehat{\mathit{HFL}}(S^3, \widetilde{K} \cup U) \otimes V^{\otimes (2n_1-1)}$, which will have a simple relationship to the Alexander multigrading of $\widehat{\mathit{HFL}}(S^3, K \cup U)\otimes V^{\otimes (n_1-1)}$.
\medskip
We may also reduce the spectral sequence of Theorem \ref{Link Floer Homology Spectral Sequence} to contain only the information of knot Floer homology of $\widetilde{K}$ and $K$. Below, $V$ and $W$ are both two-dimensional vector spaces over $\mathbb F_2$, which will later be distinguished by their gradings.
\begin{theorem} \label{Knot Floer Homology Spectral Sequence}
There is an integer $n_1$ less than half the number of crossings on a periodic diagram $D$ for $\widetilde{K}$ such that there is a spectral sequence whose $E^1$ page is $\left(\widehat{\mathit{HFK}}(S^3, \widetilde{K}) \otimes V^{\otimes (2n_1-1)} \otimes W \right)\otimes \mathbb Z_2((\theta))$ and whose $E^{\infty}$ page is isomorphic to $\left(\widehat{\mathit{HFK}}(S^3, \widetilde{K}) \otimes V^{\otimes (n_1-1)} \otimes W\right) \otimes \mathbb Z_2((\theta))$.
\end{theorem}
This spectral sequence splits along Alexander gradings of the theory $\widehat{\mathit{HFK}}(S^3, \widetilde{K}) \otimes V^{\otimes (2n_1-1)} \otimes W$, which are related to the Alexander gradings of $\widehat{\mathit{HFK}}(S^3, \widetilde{K}) \otimes V^{\otimes (n_1-1)} \otimes W$ by division by two and an overall grading shift. Analysis of the behavior of this grading yields a reproof of a classical result, proven by Alan Edmonds using minimal surface theory.
\begin{corollary} \label{Genus Inequality Corollary} \cite[Theorem 4]{MR769284} Let $\widetilde{K}$ be a doubly-periodic knot in $S^3$ and $K$ be its quotient knot. Then
\[
g(\widetilde{K}) \geq 2g(K) + \frac{\lambda-1}{2}.
\]
\end{corollary}
We also observe from the spectral sequence a proof of the following corollary.
\begin{corollary} \label{Fiberedness Corollary}
Let $\widetilde{K}$ be a doubly-periodic knot in $S^3$ and $K$ its quotient knot. If Edmonds'
condition is sharp and $\widetilde{K}$ is fibered, $K$ is fibered.
\end{corollary}
\medskip
This is a weaker version of a corollary proved by Livingston and Edmonds.
\medskip
\begin{corollary}\cite[Prop. 6.1]{MR728451} Let $\widetilde{K}$ be a doubly-periodic knot in $S^3$ and $K$ its quotient knot. If $\widetilde{K}$ is fibered, $K$ is fibered.
\end{corollary}
The spectral sequences of Theorems \ref{Link Floer Homology Spectral Sequence} and \ref{Knot Floer Homology Spectral Sequence} are analogs of the spectral sequence for the knot Floer homology of double branched covers of knots in $S^3$ constructed in \cite{Hendricks}; their construction requires only slightly more technical complexity. As in that paper, the key technical tool in the proofs of Theorems \ref{Link Floer Homology Spectral Sequence} and \ref{Knot Floer Homology Spectral Sequence} is a result of Seidel and Smith concerning equivariant Floer cohomology. Let $M$ be an exact symplectic manifold, convex at infinity, containing exact Lagrangians $L_0$ and $L_1$ and equipped with an involution $\tau$ preserving $(M, L_0, L_1)$. Let $(M^{\text{inv}}, L_0^{\text{inv}}, L_1^{\text{inv}})$ be the submanifolds of each space fixed by $\tau$. Then under certain stringent conditions on the normal bundle $N(M^{\text{inv}})$ of $M^{\text{inv}}$ in $M$, there is a rank inequality between the Floer cohomology $\mathit{HF}(L_0,L_1)$ of the two Lagrangians $L_0$ and $L_1$ in $M$ and the Floer cohomology $\mathit{HF}(L_0^{\text{inv}}, L_1^{\text{inv}})$ of $L_0^{\text{inv}}$ and $L_1^{\text{inv}}$ in $M^{\text{inv}}$. More precisely, they consider the normal bundle $N(M^{\text{inv}})$ to $M^{\text{inv}}$ in $M$ and its pullback $\Upsilon(M^{\text{inv}})$ to $M^{\text{inv}} \times [0,1]$. We ask that $M$ satisfy a $K$-theoretic condition called \textit{stable normal triviality} relative to two Lagrangian subbundles over $L_0^{\text{inv}} \times \{0\}$ and $L_1^{\text{inv}} \times \{1\}$. Seidel and Smith prove the following.
\begin{theorem} \label{SeidelSmith} \cite[Section 3f]{MR2739000}
If $\Upsilon(M^{\text{inv}})$ carries a stable normal trivialization, there is a spectral sequence whose $E^1$ page is $\mathit{HF}(L_0,L_1) \otimes \mathbb Z_2((\theta))$ and whose $E^{\infty}$ page is isomorphic to $\mathit{HF}(L_0^{\text{inv}}, L_1^{\text{inv}}) \otimes \mathbb Z_2((\theta))$ as $\mathbb Z_2((\theta))$ modules.
\end{theorem}
This paper is organized as follows: In Section 2 we recall the construction and important properties of link Floer homology. In Section 3 we construct equivariant Heegaard diagrams for periodic knots, prove Theorem \ref{Murasugi Theorem} from these diagrams, and explain how Corollaries \ref{Genus Inequality Corollary} and \ref{Fiberedness Corollary} follow from Theorems \ref{Link Floer Homology Spectral Sequence} and \ref{Knot Floer Homology Spectral Sequence}. In Section 4 we review the basics of Lagrangian Floer cohomology and Seidel and Smith's localization theory. In Section 5 we check the basic symplectic geometry conditions of Seidel and Smith's theory on the symmetric products used in the computation of knot and link Floer homology, and provide a description of the homotopy type and cohomology of the most general of these symmetric products (which contains the symmetric products used to compute knot and link Floer homology as submanifolds). In Section 6 we give a proof that this general symmetric product carries a stable normal trivialization, which will imply that the symmetric products used in the computation of knot and link Floer homology do as well. In Section 7 we compute the spectral sequences of Theorems \ref{Link Floer Homology Spectral Sequence} and \ref{Knot Floer Homology Spectral Sequence} for the unknot and the trefoil as doubly-periodic knots as examples.
\subsection{Acknowledgements}
I am grateful to Robert Lipshitz for suggesting this problem, providing guidance, and reading a draft of this paper. Many thanks also to Matthew Hedden, Tye Lidman, Dylan Thurston, Allison Gilmore, Jen Hom and Ciprian Manolescu for helpful conversations, and to Adam Levine for pointing out the argument of Lemma \ref{Generator Matching Lemma}. I am also indebted to Chuck Livingston and Paul Kirk for their enthusiasm and commentary, particularly concerning the arguments leading up to Lemmas \ref{Alexander Gradings Link Lemma} and \ref{Alexander Gradings Knot Lemma}.
\section{Heegaard Floer Homology Theories} \label{Heegaard Floer Background Section}
We pause to recall the construction of link Floer homology in the three sphere, first defined by Ozsv{\'a}th and Szab{\'o} in \cite{MR2443092}. All work is done over $\mathbb F_2$.
\begin{definition}
A \textit{multipointed Heegaard diagram} $\mathcal D = (S, \boldsymbol \alpha, \boldsymbol \beta, {\bf w}, {\bf z})$ consists of the following data.
\begin{itemize}
\item An oriented surface $S$ of genus $g$.
\item Two sets of basepoints ${\bf w} = (w_1,\cdots,w_{n})$ and ${\bf z} = (z_1,\cdots,z_{n})$
\item Two sets of closed embedded curves $\boldsymbol \alpha = \{\alpha_1,\cdots,\alpha_{g+n-1}\}$ and $\boldsymbol \beta = \{\beta_1,\cdots,\beta_{g+n-1}\}$ such that each of $\boldsymbol \alpha$ and $\boldsymbol \beta$ spans a $g$-dimensional subspace of $H_1(S)$, $\alpha_i \cap \alpha_j = \emptyset = \beta_i \cap \beta_j$ for $i\neq j$, each $\alpha_i$ and $\beta_j$ intersect transversely, and each component of $S - \cup \alpha_i$ and of $S - \cup \beta_i$ contain exactly one point of ${\bf w}$ and one point of ${\bf z}$.
\end{itemize}
\end{definition}
We use $\mathcal D$ to obtain an oriented 3-manifold $Y$ by attaching two-handles to $S \times I$ along the curves $\alpha_i \times \{0\}$ and $\beta_i \times \{1\}$ and filling in $2n$ three-balls to close the resulting manifold. This yields a handlebody decomposition $Y = H_{\boldsymbol \alpha} \cup_{S} H_{\boldsymbol \beta}$ of $Y$. For this paper we will in fact restrict to the case that this produces the three-sphere $S^3$. The Heegaard diagram $\mathcal D$ furthermore determines a knot or link in $Y$: connect the $z$ basepoints to the $w$ basepoints in the complement of the curves $\alpha_i$, push these arcs into the handlebody $H_{\boldsymbol \alpha}$, then connect the $w$ basepoints to the $z$ basepoints in the complement of the curves $\beta_i$ and push these arcs into the $H_{\boldsymbol \beta}$ handlebody.
\medskip
We insist on numbering our basepoints such that if $L = K_1 \cup \cdots \cup K_{\ell}$ is the link thus produced, there are $n_j$ pairs of basepoints on $K_j$, and there are integers $-1 =k_0<k_1<\cdots<k_{\ell} =n$ with $k_j - k_{j-1} = n_j$ such that $w_{k_{j-1}+1},\cdots,w_{k_j}, z_{k_{j-1}+1},\cdots,z_{k_{j}}$ are the basepoints on $K_j$. (In the examples we are actually interested in, we will have $L = K_1 \cup K_2$ with $n_1$ pairs of basepoints on $K_1$ and a single pair of basepoints on $K_2$, so the notation will not be too bad.)
\medskip
There is an important collection of two-chains on the surface $S$ on which we shall impose one more technical condition.
\begin{definition}
A \textit{periodic domain} is a 2-chain $P$ on $S \backslash \{{\bf w}\}$ whose boundary may be expressed as a linear combination of the $\alpha$ and $\beta$ curves.
\end{definition}
Note that this definition agrees with the convention of \cite[Definition 3.4]{MR2443092}, in which the set of periodic domains is the set of 2-chains with boundary a linear combination of $\alpha$ and $\beta$ curves which contain the components of $S - {\boldsymbol \alpha} - {\boldsymbol \beta}$ containing a basepoint algebraically zero times. The set of periodic domains on $S$ is in bijection with $\mathbb Z^{b_2(Y) + n-1} = H_2(Y \# (S^1 \times S^2)^{\#(n-1)})$. If we additionally puncture the surface $S$ by removing the basepoints $\{{\bf z}\}$, the remaining periodic domains are in bijection with $H_2(Y - L) \cong Z^{b_2(Y) + {\ell}-1}$, where $\ell$ is the number of components of the link. We say that $D$ is \textit{weakly admissible} if every periodic domain on $S$ has both positive and negative local multiplicities, and require that any Heegaard diagram we use to compute link Floer homology have this property.
\medskip
The construction of the link Floer homology $\widehat{\mathit{HFL}}(Y,L)$ uses the symmetric products $\text{Sym}^{g+n-1}(S)$ consisting of all unordered $(g+n-1)$-tuples of points in $S$. This space is the quotient of $(S)^{g+n-1}$ by the action of the symmetric group $S_{g+n-1}$ permuting the factors of $(S)^{g+n -1}$, and its holomorphic structure is defined by insisting that the quotient map $(S)^{g+n-1} \rightarrow \text{Sym}^{g+n-1}(S)$ be holomorphic. In particular, if $j$ is a complex structure on $S$, there is a natural complex structure $\text{Sym}^{g+n -1}(j)$ on the symmetric product. There are two transversely intersecting submanifolds of $\text{Sym}^{g+n -1}(S)$ of especial interest, namely the two totally real embedded tori $\mathbb T_{\boldsymbol \alpha} = \alpha_1 \times\cdots \times \alpha_{g+n -1}$ and $\mathbb T_{\boldsymbol \beta} = \beta_1 \times\cdots\times \beta_{g+n-1}$. The chain complex $\widehat{\mathit{CFL}}(\mathcal D)$ for knot Floer homology is generated by the finite set of intersection points of $\mathbb T_{\boldsymbol \alpha}$ and $\mathbb T_{\boldsymbol \beta}$. More concretely, a generator of $\widehat{\mathit{CFL}}(\mathcal D)$ is a point ${\bf x} = (x_1 \cdots x_{g+n -1}) \in \text{Sym}^{g+n-1}(S)$ such that each $\alpha$ or $\beta$ curve contains a single $x_i$.
\medskip
In its original form, link Floer homology is computed as follows: let ${\bf x, y}$ be two intersection points in $\widehat{\mathit{CFL}}(\mathcal D)$. Denote by $\pi_2(\bf{x,y})$ the set of Whitney disks $\phi:B_1(0) \rightarrow \text{Sym}^{g+n-1}(S)$ from the unit disk in the complex plane to our symmetric product such that $\phi(-i) = {\bf x}$, $\phi(i) = {\bf y}$ and $\phi$ maps the portion of the boundary of the unit disk with positive real part into $\mathbb T_{\boldsymbol \alpha}$ and the portion with negative real part into $\mathbb T_{\boldsymbol \beta}$. The most common method of studying such maps $\phi$ is to use the following familiar construction of Ozsv{\'a}th and Szab{\'o} to associate to any homotopy class of Whitney disks in $\pi(x,y)$ a domain in $S$. There is a $(g+n-1)$-fold branched cover
\[
S \times \text{Sym}^{g+n -2}(S) \rightarrow \text{Sym}^{g+n-1}(S )
\]
The pullback of this branched cover along $\phi$ is a $(g+n -1)$-fold branched cover of $B_1(0)$ which we shall denote $\Sigma(B_1(0))$. Consider the induced map on $\Sigma(B_1(0))$ formed by projecting the total space of this fibration to $S$.
$$
\xymatrix{
\Sigma(B_1(0)) \ar[r] \ar[d] & S \times \text{Sym}^{g+n-2}(S) \ar[r] \ar[d] & S \\
B_1(0) \ar[r]^-{\phi} & \text{Sym}^{g+n-1}(S)
}
$$
We associate to $\phi$ the image of this projection counted with multiplicities; to wit, we let $D = \Sigma a_i D_i$ where $D_i$ are the closures of the components of $S - \cup \alpha_i - \cup \beta_i$ and $a_i$ is the algebraic multiplicity of the intersection of the holomorphic submanifold $V_{x_i} = \{x_i\} \times \text{Sym}^{g+n-2}(S)$ with $\phi(B_1(0))$ for any interior point $x_i$ of $D_i$. The boundary of $D$ consists of $\alpha$ arcs from points of ${\bf x}$ to points of ${\bf y}$ and $\beta$ arcs from points of ${\bf y}$ to points of ${\bf x}$. If $D_i$ contains a basepoint $z_j$, then we introduce some additional notation by letting $a_i = n_{z_i}(\phi)$ be the algebraic intersection number of $z_i \times \text{Sym}^{g+n-2}(S)$ with the image of $\phi$.
\medskip
Given $\phi \in \pi_2({\bf x}, {\bf x})$, we define the \textit{Maslov index} as follows. Recall that $\phi: D^2 \rightarrow \text{Sym}^{g+n-1}(S)$ maps the portion of the boundary of the unit disk $D^2$ in the right half of the complex plane $\mathbb C = \{u+iv: u,v \in \mathbb R\}$ to a loop in $\mathbb T_{\boldsymbol \alpha}$ and the portion of the boundary in the left half to $\mathbb T_{\boldsymbol \beta}$. Choose a constant trivialization of the orientable real vector bundle $\phi^*(T(\mathbb T_{\boldsymbol \alpha}))$ over $\partial D|_{v\geq 0}$. We may tensor this real trivialization with $\mathbb C$ and extend to a complex trivialization of $\phi^*(T(\text{Sym}^{g+n-1}(S))$ by pushing across the disk linearly. Relative to this trivialization, the real bundle $\phi^*(T(\mathbb T_{\boldsymbol \beta}))$ over $\partial D|_{v \geq 0}$ induces a loop of real subspaces of $\mathbb C^{g+n-1} = \phi*(T\text{Sym}^{g+n-1}(S))$. The winding number of this loop is the Maslov index of the map $\phi$. Notice that we could also have used $\phi^*(J(T(\mathbb T_{\boldsymbol \alpha})))$ and $\phi^*(T(\mathbb T_{\boldsymbol \beta})$, where $J$ is the complex structure on the vector bundle $T\text{Sym}^{g+n-1}(S)$, and obtained the same number.
\medskip
The Maslov index $\mu(\phi)$ can equivalently be computed using the associated domain $\Sigma a_i D_i$ in a formula of Lipshitz's \cite[Proposition 4.2]{MR2240908}. For each domain $D_i$, let $e(D_i)$ be the Euler measure of $D_i$. In particular, if $D_i$ has $2k$ corners, $e(D_i) = 1- \frac{k}{2}$. Let $p_{{\bf x}}(D)$ be the sum of the average of the multiplicities of $D$ at the four corners of each point in ${\bf x}$ and likewise for $p_{{\bf y}}(D)$. Then the Maslov index is
\begin{equation} \label{Maslov index formula}
\mu(\phi) = \sum a_i e(D_i) + p_{{\bf x}}(D) + p_{{\bf y}}(D).
\end{equation}
In the case that $[\phi] \in \pi_2({\bf x}, {\bf x})$ is a domain from ${\bf x}$ to itself, and therefore a periodic domain, we have the following alternate interpretation of the Maslov index. Because $\phi(i) = \phi(-i)$, we see that $\phi$ sends $\partial D|_{v \geq 0}$ to a loop in $\mathbb T_{\boldsymbol \alpha}$ and $\partial D_{v \leq 0}$ to a loop in $\mathbb T_{\boldsymbol \beta}$. Therefore we may replace $\phi$ by a map $\widehat{\phi}: S^2 \times I \rightarrow \text{Sym}^{g+n-1}(S)$ which maps $S^1 \times \{1\}$ to $\mathbb T_{\boldsymbol \alpha}$ and $S^1 \times \{0\}$ to $\mathbb T_{\boldsymbol \beta}$. We then consider the complex pullback bundle $E = \widehat{\phi}^*(T(\text{Sym}^{g+n-1}(S))$ to $S^2 \times I$ and the totally real subbundles $\widehat{\phi}|_{S^1 \times \{1\}}^{*} (T(\mathbb T_{\boldsymbol \alpha}))$ of $E_{S^1 \times \{1\}}$ and $\widehat{\phi}|_{S^1 \times \{0\}}^{*} (T(\mathbb T_{\boldsymbol \beta}))$ of $E|_{S^1 \times \{0\}}$. The Maslov index is still calculated by trivializing $\widehat{\phi}|_{S^1 \times \{1\}}^{*} (T(\mathbb T_{\boldsymbol \alpha}))$, complexifying, and computing the winding number of the loop of real-half dimensional subspaces in $\mathbb C^{g+n-1}$ represented by $\widehat{\phi}|_{S^1 \times \{0\}}^{*} (T(\mathbb T_{\boldsymbol \beta}))$ with respect to the trivialization. This number classifies the bundle in the following way: complex vector bundles over the annulus whose restriction to the boundary of the annulus carries a canonical real subbundle are in bijection with maps $[(S^1 \times I, \partial(S^1 \times I)), (BU,BO)] = \langle (S^1 \times I, \partial(S^1 \times I)), (BU,BO) \rangle \cong Z$, where the map to $\mathbb Z$ is the Maslov index $\mu(\phi)=\mu(\widehat{\phi})$ \cite[Theorem C.3.7]{MR2045629}.
\medskip
Now let us look at the bundle $E$ over $S^1 \times I$ and its real subbundles over the boundary components of $S^1 \times I$ from a slightly different perspective. The real bundles $\widehat{\phi}|_{S^1 \times \{1\}}^{*} (T(\mathbb T_{\boldsymbol \alpha}))$ and $\widehat{\phi}|_{S^1 \times \{0\}}^{*} (T(\mathbb T_{\boldsymbol \beta}))$ are orientable, hence trivializable over the circle, so we may choose real trivializations and tensor with $\mathbb C$ to obtain a complex trivialization of $E|_{\partial(S^1 \times I)}$. We can now regard $E$ as a relative vector bundle $E_{\text{rel}}$ over $(S^1 \times I, \partial(S^1 \times I))$, and consider its relative first Chern class $c_1(E|_{\text{rel}})$. Equivalently, we may use this trivialization to construct a vector bundle $\widetilde{E}$ over $(S^1 \times I)/ \partial(S^1 \times I) \cong S^2$ such that the pullback $q^*(\widetilde{E})$ along the quotient map is $E$. Then $c_1(\widetilde{E})$ is the relative first Chern class $c_1(E|_{\text{rel}})$ under the identification $H^1(S^2) \simeq H^1(S^1\times I, \partial(S^1 \times I))$. Moreover, isomorphism classes of vector bundles over $S^2$ are in bijection with homotopy classes of maps $[S^2, BU] \cong \langle S^2, BU \rangle = \pi_2(BU) \cong \mathbb Z$, where the identification with $\mathbb Z$ is via the first Chern class. Using the homotopy long exact sequence of the pair $(BU,BO)$, we observe the following relationship between $\mu$ and $c_1$.
\[
\xymatrix{
\pi_2(BU) \ar[r] \ar[d]^{c_1}& \pi_2(BU, BO) \ar[r] \ar[d]^{\mu} & \pi_1(BO) \ar[d] \\
\mathbb Z \ar[r]^-{\times 2} & \mathbb Z \ar[r] & \mathbb Z_2
}
\]
Therefore the Maslov index $\mu(\phi)$ is twice the relative first Chern class $c_1(E|_{\text{rel}})$.
\medskip
The differential $\partial$ on $\widehat{\mathit{CFL}}(\mathcal D)$ counts the dimension of the moduli spaces of pseudo-holomorphic curves of Maslov index one in $\pi_2(\bf{x, y})$.
\begin{align*}
\partial({\bf x}) = \sum_{{\bf y} \in \mathbb T_{\boldsymbol \alpha} \cap \mathbb T_{\boldsymbol \beta}} \sum_{\substack{\phi \in \pi_2({\bf x, y}) : \\
\mu(\phi) =1 \\
n_{w_i}(\phi) = 0 \\
n_{z_j}(\phi) = 0
}}
\#\left(\frac {M(\phi)}{\mathbb R}\right) {\bf y}
\end{align*}
\noindent Ozsv{\'a}th and Szab{\'o} have shown \cite{MR2065507} that this is a well-defined differential. Indeed, once we show that the homology of $\widehat{\mathit{CFL}}(\mathcal D)$ with respect to $\partial$ can be seen as a Floer cohomology theory, this will be a special case of the well-definedness of the differential of Definition \ref{FloerCohomologyDefn}.
\medskip
The complex $\widehat{\mathit{CFL}}(\mathcal D)$ carries a (relative, for our purposes) homological grading called the Maslov grading $M({\bf x})$ which takes values in $\mathbb Z$. Suppose ${\bf x}$ and ${\bf y}$ are connected by a Whitney disk $\phi$. Then the relative Maslov grading is determined by
\begin{align*}
M({\bf x}) - M({\bf y}) &= \mu(\phi) - 2\sum_i n_{w_i}(\phi).
\end{align*}
The complex also carries an additional Alexander multigrading ${\bf A} = (A_1,\cdots,A_{\ell})$. This multigrading takes values in an affine lattice $\mathbb H$ over $H_1(S^3-L; \mathbb Z)\cong H_1(L)$. Recall that $H_1(S^3-L; \mathbb Z) \cong \mathbb Z^{\ell}$ generated by the homology classes of meridians $\mu_j$ of the component knots $K_j$ of $L$. Define the lattice $\mathbb H$ to consist of elements
\begin{align*}
\sum_{i=1}^{\ell} a_i[\mu_i]
\end{align*}
\noindent where $a_i \in \mathbb Q$ satisfies the property that $2a_i + \ell k(K_i, L-K_i)$ is an even integer. To determine the relative Alexander multigrading, recall that the basepoints $w_{k_{j-1} +1},\cdots,w_{k_j},z_{k_{j-1} +1},\cdots,z_{k_j}$ lie on $K_j$ (with the convention that $k_0 = 0$). Then once again if $\phi$ is a Whitney disk connecting ${\bf x}$ and ${\bf y}$,
\begin{align*}
A_j({\bf x}) - A_j({\bf y}) &= \sum_{i=k_{j-1} +1}^{k_j} n_{z_i}(\phi) - \sum_{i=k_{j-1}+1}^{k_j} n_{w_i}(\phi).
\end{align*}
We can also see the relative Alexander multigrading geometrically. Let ${\bf x},{\bf y} \in \mathbb T_{\boldsymbol \alpha} \cap \mathbb T_{\boldsymbol \beta}$, we find paths
\begin{align*}
a:[0,1]\rightarrow \mathbb T_{\boldsymbol \alpha} \text{ and } b:[0,1]\rightarrow \mathbb T_{\boldsymbol \beta}
\end{align*}
\noindent such that $\partial a = \partial b = {\bf x} - {\bf y}$. (For example, $a \cup b$ may be the boundary of a Whitney disk $\phi$ from ${\bf y}$ to ${\bf x}$.) View these paths as as one-chains on $S \backslash \{{\bf w, z}\}$. Since attaching one- and two-handles to the $\alpha$ and $\beta$ curves on $S$ and filling in three-balls at the basepoints yields $Y$, we obtain a trivial one-cycle in $Y$. Indeed, a domain $D$ on $\mathcal D$ is the shadow of a Whitney disk if and only if its boundary, viewed as a cycle on $S$, descends to a trivial cycle on $Y$. However, if we attach $\alpha$ and $\beta$ circles to $S \backslash \{{\bf w,z}\}$ (and no three balls) we obtain the manifold $Y-L$ and a one cycle $\epsilon({\bf x}, {\bf y})$ in $Y-L$.
\begin{align*}
\underline{\epsilon}_{{\bf w,z}}: (\mathbb T_{\boldsymbol \alpha} \cap \mathbb T_{\boldsymbol \beta}) \times (\mathbb T_{\boldsymbol \alpha} \cap \mathbb T_{\boldsymbol \beta}) \rightarrow H_1(Y-L; \mathbb Z)
\end{align*}
We obtain the following lemma (which has only been very slightly adjusted from the original to account for the possibility of multiple pairs of basepoints on a link component).
\begin{lemma} \cite[Lemma 3.10]{MR2443092} An oriented $\ell$-component link $L$ in $Y$ induces a map
\begin{align*}
\prod H_1(Y-L) \rightarrow \mathbb Z^{\ell}\\
\end{align*}
where $\prod_i(\gamma)$ is the linking number of $\gamma$ with the $i$th component $K_i$ of $L$. In particular, for ${\bf x},{\bf y} \in \mathbb T_{\boldsymbol \alpha}\cap \mathbb T_{\boldsymbol \beta}$ and $\phi \in \pi_2({\bf x}, {\bf y})$, we have
\begin{align*}
\prod_i(\underline{\epsilon}_{{\bf w,z}}({\bf x},{\bf y})) = \sum_{n_{i-1}+1}^{n_i} n_{z_j}(\phi) - \sum_{n_{i-1}+1}^{n_i} n_{z_j}(\phi).
\end{align*}
\end{lemma}
\begin{proof}
The proof is nearly identical to the original: $\phi$ induces a nulhomology of $\underline{\epsilon}({\bf x}, {\bf y})$, which meets the $i$th component $K_i$ of $L$ with intersection number $\sum_{n_{i-1}}^{n_1} n_{z_i}(\phi) - \sum_{n_{i-1}}^{n_1} n_{w_i}(\phi)$.
\end{proof}
\noindent Formulas for the absolute Maslov grading may be found in \cite[Theorem 3.3]{MR2249248} , and the absolute Alexander multigrading are laid out in \cite[Section 4.2 and Section 8.1]{MR2443092}, but neither will be needed here.
\medskip
The differential $\partial$ lowers the Maslov grading by one and preserves the Alexander multigrading. Therefore $\widehat{\mathit{CFL}}(\mathcal D)$ also splits along Alexander grading.
\medskip
The homology of $\widehat{\mathit{CFL}}(\mathcal D)$ with respect to the differential $\partial$ is very nearly the link Floer homology of $(Y,K)$. There is, however, a slight subtlety having to do with the number of pairs of basepoints $z_i$ and $w_i$ on $\mathcal D$. Let $V_i$ be a vector space over $\mathbb F_2$ with generators in gradings $(M,(A_1,\cdots,A_{\ell})) = (0,(0,\cdots,0))$ and $(M,(A_1,\cdots,A_j,\cdots,A_{\ell})) = (-1,(0,\cdots,-1,\cdots,0))$, with the $-1$ in the $j$th component. As before, let $K_j$ carry $n_j$ pairs of basepoints.
\begin{definition}
The homology of the complex $\widehat{\mathit{CFL}}(\mathcal D)$ with respect to the differential $\partial$ is
\[
\widetilde{\mathit{HFL}}(\mathcal D) = \widehat{\mathit{HFL}}((S^3,L)) \otimes V_1^{\otimes (n_1-1)}\otimes\cdots\otimes V_{\ell}^{\otimes (n_{\ell}-1)}.
\]
\end{definition}
The theory $\widehat{\mathit{HFL}}(S^3,L)$ is symmetric with respect to the Alexander multigrading as follows. Let $\widehat{\mathit{HFL}}_d(S^3,L, (A_1,...,A_j))$ be the summand of the link Floer homology of $L$ in Alexander multigrading $(A_1,...,A_j)$ and Maslov grading $d$.
\begin{proposition} \cite[Proposition 8.2]{MR2443092} There is an isomorphism
\[
\widehat{\mathit{HFL}}_d(S^3, L, (A_1,...,A_j)) \cong \widehat{\mathit{HFL}}_{d-\sum A_i}(S^3, L, (-A_1,...,-A_j)).
\]
\end{proposition}
In particular, ignoring Maslov gradings, we see that the link Floer homology is symmetric in each of its Alexander gradings.
\medskip
Before moving on, let us consider one addition differential on the complex $\widehat{\mathit{CFL}}(\mathcal D)$. Suppose that in addition to the disks we counted previously, we also include disks passing over $z$ basepoints on the component $K_i$ of $L$. In other words, consider the differential $\partial_{K_j}$ defined as follows.
\begin{align*}
\partial_{K_j}({\bf x}) = \sum_{{\bf y} \in \mathbb T_{\boldsymbol \alpha} \cap \mathbb T_{\boldsymbol \beta}} \sum_{\substack{\phi \in \pi_2({\bf x, y}) : \\
\mu(\phi) =1 \\
n_{w_i}(\phi) = 0 \\
n_{z_j}(\phi) = 0 \text{ if } z_i \notin K_j
}}
\#\left(\frac {M(\phi)}{\mathbb R}\right) {\bf y}
\end{align*}
This has the effect of discounting the contribution of the component $K_j$ to the link Floer homology, but of maintaining the effect of an extra $n_j$ pairs of basepoints on the Heegaard surface. Ergo we have the following proposition. Let $W$ be a two-dimensional vector space over $\mathbb F_2$ with summands in gradings $(M, (A_1,\cdots,\widehat{A_j},\cdots,A_{\ell})) = (0, (0,...,0))$ and $(M, (A_1,...\widehat{A_j},...A_{\ell})) = (-1, (0,...,0))$.
\begin{proposition} \label{Ignore Component Proposition} \cite[Proposition 7.2]{MR2443092} The homology of the complex $\widehat{\mathit{CFL}}(S^3, L)$ with respect to the differential $\partial_{K_j}$ is isomorphic to $\widehat{\mathit{HFL}}(S^3, L-K_j) \otimes V_1^{\otimes (n_1-1)} \otimes\cdots\otimes W^{\otimes n_j} \otimes\cdots\otimes V_{\ell}^{\otimes (n_{\ell} -1)}$.
\end{proposition}
We may think of Proposition \ref{Ignore Component Proposition} as the assertion that there is a spectral sequence from the $\mathbb Z^{\ell}$ graded theory $\widehat{\mathit{HFL}}(S^3, L)$ to the $\mathbb Z^{\ell-1}$ graded theory $\widehat{\mathit{HFL}}(S^3, L-K_j)$ by computing all differentials that change the $k_j$th entry of the $\mathbb Z^{\ell}$ multigrading. This spectral sequence comes with an overall shift in relative Alexander gradings, which is computed by considering fillings of relative $\text{spin}^{\text{c}}$ structures on $S^3-L$ to relative $\text{spin}^{\text{c}}$ structures on $S^3 - (L- K_j)$. The generally slightly complicated formula admits a simple expression in the case of two-component links, which is the only case of interest to this paper.
\begin{lemma} \label{Gradings Shift Lemma} \cite[Lemma 3.13]{MR2443092} Let $L = K_1 \cup K_2$, and $\lambda= \ell k(K_1,K_2)$. Then suppose $\mathcal D$ is a Heegaard diagram for $(S^3,L)$, and ${\bf x} \in \widetilde{\mathit{CFL}}(\mathcal D)$. If $(A_1({\bf x}), A_2({\bf x})) = (i,j)$ in the complex $\widetilde{\mathit{CFL}}(\mathcal D)$ with differential $\partial$, then in the complex $\widetilde{\mathit{CFK}}(\mathcal D)$ with differential $\partial_{K_2}$, the Alexander grading of ${\bf x}$ is $A_1({\bf x}) = i - \frac{\lambda}{2}$.
\end{lemma}
That is, forgetting one component of a two-component link has the effect of shifting Alexander gradings of the other component downward by $\frac{\lambda}{2}$. The proof comes from an analysis of filling relative $\text{spin}^{\text{c}}$ structures; the effect of extending a relative $\text{spin}^{\text{c}}$ structure $\mathfrak s$ on $S^3-\nu L$ to $S^3 - \nu(L-K_j)$ is to shift the Chern class $c_1(\mathfrak s)$ by the Poincar\'{e} dual of the homology class of $K_j$ in $S^3 - \nu(L-K_j)$. For a two-component link this is a shift by the linking number.
\subsection{Link Floer homology, the multivariable Alexander polynomial, and the Thurston norm}
Recall that the \textit{multivariable Alexander polynomial} of an oriented link $L = K_1 \cup \cdots \cup K_{\ell}$ is a polynomial invariant $\Delta_L(t_1,\cdots,t_{\ell})$ with one variable for each component of the link. While its relationship to the Alexander polynomials of the component knots is in general slightly complicated, in the case of a two-component $L = K_1 \cup K_2$ Murasugi proved the following using Fox calculus.
\begin{lemma} \cite[Proposition 4.1]{MR0292060} \label{Two Component Link Lemma}
Let $L=K_1\cup K_2$ be an oriented two-component link with $\ell k(K_1, K_2) = \lambda$. If $\Delta_L(t_1,t_2)$ is the multivariable Alexander polynomial of $L$ and $\Delta_{K_1}(t_1)$ is the ordinary Alexander polynomial of $K_1$, then
\begin{align*}
\Delta_L(t_1, 1) = (1+t+t^2+ \cdots +t^{\lambda-1}) \Delta_{K_1}(t)
\end{align*}
\end{lemma}
The Euler characteristic of link Floer homology encodes the multivariable Alexander polynomial of the link as follows.
\begin{proposition} \cite[Theorem 1.3]{MR2443092} If $L$ is an oriented link,
\begin{align*}
\sum_{[{\bf x}] \in \widehat{\mathit{HFL}}(S^3,L)} (-1)^{M({\bf x})} t_1^{A_1({\bf x})}\cdots t_{\ell}^{A_{\ell}({\bf x})} = \begin{cases}
\left(\prod_{i=1}^{\ell} (t_i^{\frac{1}{2}} - t_i^{-\frac{1}{2}})\right) \Delta_L(t_1,\cdots , t_{\ell}) & \ell>1 \\
\Delta_L(t_1) & \ell=1
\end{cases}
\end{align*}
\end{proposition}
Link Floer homology also categorifies the Thurston seminorm of the link complement. Let us recall the definition of the Thurston seminorm on a three manifold with boundary.
\begin{definition}[]
Let $\gamma \in H^2(M, \partial M)$. The Thurston seminorm $x(\gamma)$ is
\begin{align*}
x(\gamma) = \text{sup}\{ -\chi(S) \}
\end{align*}
\noindent where $S$ is any embedded surface in $(M, \partial M)$ with $[S] = \gamma$.
\end{definition}
An important special case occurs when $M$ is the complement of a link $L = K_1 \cup \cdots \cup K_{\ell}$, that is, $M = S^3 - \nu K_1 -\cdots-\nu K_{\ell}$. Then $H^2(M, \partial M) \cong H_1(L)$, and computing the Thurston seminorm of the element of $H^2(M, \partial M)$ corresponding to $[\sum a_iK_i] \in H_1(L)$ is a matter of computing the minimal Euler characteristic of an embedded surface $F$ whose intersection with a meridian $\mu_i$ of $K_i$ is $a_i$ for each $i$. In particular, $x([K_i])$ is the minimal Euler characteristic of surface $F$ with boundary one longitude of $K_i$ and an arbitrary number of meridians of the components of $L$. (For practical purposes, one may consider taking a Seifert surface $F$ for $K_i$ and puncturing $F$ wherever it intersects some other component of $L$. However, take note that puncturing a minimal Seifert surface for $K_i$ does not necessarily result in a Thurston-norm minimizing surface.) When $L$ is a knot, this determines the minimal Euler characteristic of a Seifert surface for the knot, and thus determines the genus of the knot. Because $H_1(L) \cong H^1(S^3 - \nu L)$, we commonly refer to the element of $H^2(M, \partial M)$ which spans $K_i$ as the dual to the homology class of meridian $\mu_i$ of $K_i$.
\medskip
Thurston showed \cite{MR823443} that the Thurston seminorm extends to an $\mathbb R$-valued function of $H_2(S^3 - \nu(L)) \cong H_2(S^3,L)$:
\[
x_L: H_2(S^3, L; \mathbb R) \rightarrow \mathbb R.
\]
Link Floer homology yields a related function. Recall that $\mathbb H \subset H^2(S^3,L; \mathbb R)\cong H_1(L; \mathbb R)$ is the affine lattice of real second cohomology classes $h=\sum A_i [\mu_i]$ for which $\widehat{\mathit{HFL}}(S^3,L, {\bf A})$ is defined. We have
\begin{align*}
y: H^1(S^3 - L; \mathbb R) \rightarrow \mathbb R
\end{align*}
\noindent which is defined by
\[
y(\gamma) = \text{max}_{\{\sum A_i [\mu_i] \in \mathbb H \subset H_1(L; R): \widehat{\mathit{HFL}}(L,{\bf A}) \neq 0\}} |\langle \sum A_i\widehat{[\mu_i]},\gamma \rangle|.
\]
The categorification considers the case of links with no \textit{trivial components}, that is, unknotted components unlinked with the rest of the link.
\begin{proposition} \cite[Thm 1.1]{MR2393424} \label{Statement of Categorification} Let $L$ be an oriented link with no trivial components. Given $\gamma \in H^1(S^3 - L; \mathbb R)$, the link Floer homology groups determine the Thurston norm of $L$ via the relationship
\begin{align*}
x_L(\text{PD}[\gamma]) + \sum_{i=1}^{\ell} | \langle \gamma, \mu_i \rangle | = 2y(\gamma).
\end{align*}
Here $\mu_i$ is the homology class of the meridian for the $i$th component of $L$ in $H_1(S^3 -L; \mathbb R)$, and therefore $| \langle h, \mu_i \rangle |$ is the absolute value of the Kronecker pairing of $h$ with $\mu_i$.
\end{proposition}
We will primarily evaluate this equality on the dual classes to the meridians $\mu_i$ themselves. Through a slight but traditional abuse of notation, we will sometimes say $x_L([K_i])$ for the Thurston norm of the dual to $\mu_i$. In the case that $K$ is a knot, so that $x_K([K]) = 2g(K) - 1$, Proposition \ref{Statement of Categorification} reduces to the familiar theorem of \cite[Theorem 1.2]{MR2023281} that the top Alexander grading $i$ for which $\widehat{\mathit{HFK}}(S^3,K,i)$ is nontrivial is the genus of the knot. In general, observe that if we evaluate on $\mu_i$, we obtain $x_L([K_i]) + 1 = 2y(\mu_i)$. In other words, the total breadth of the $A_i$ Alexander grading in the link Floer homology is the Thurston norm of the dual to $K_i$ plus one.
\medskip
Before leaving the realm of link Floer homology background, we will require one further result concerning the knot Floer homology of fibred knots.
\begin{proposition}\cite[Thm 1.1]{MR2357503}, \cite[Thm 1.4]{MR2450204} \label{Fiberedness Condition Lemma}
Let $K$ be a knot, and $g(K)$ its genus. Then $K$ is fibered if and only if $\widehat{\mathit{HFK}}(S^3, K, g(K)) = \mathbb Z)$.
\end{proposition}
The forward direction (that if $K$ is fibred, then the knot Floer homology in the top nontrivial Alexander grading is $\mathbb Z$) is due to Ozsv{\'a}th and Szab{\'o} \cite[Theorem 1.1]{MR2153455}, whereas the other direction was proved by Ghiggini \cite{MR2450204} in the case $g=1$ and Ni \cite{MR2357503} in the general case.
\medskip
We are now ready to consider the specific case of periodic knots.
\section{Proofs of Murasugi's and Edmonds' Conditions} \label{Periodic Knots Section}
Let $\widetilde{K}\subset S^3$ be an oriented $q$-periodic knot and $K$ its quotient knot. We will begin by constructing a Heegaard diagram for $(S^3, \widetilde{K} \cup U)$ which is preserved by the action of $\mathbb Z_q$ on $(S^3, \widetilde{K} \cup U)$ and whose quotient under this action is a Heegaard diagram for $(S^3,K\cup U)$.
\subsection{Heegaard diagrams for periodic knots}
As in \cite{Hendricks}, it will be necessary to work with Heegaaard diagrams for $(S^3, K \cup U)$ on the sphere $S^2$. Regard $S^3$ as $\mathbb R^3 \cup \{\infty\}$ and arrange $\widetilde{K}$ such that the unknotted axis of periodicity $U$ is the $z$-axis together with the point at infinity. Then the projection of $\widetilde{K}$ to the $xy$-plane together with the point at infinity is a periodic diagram $\widetilde{E}$ for $\widetilde{K}$. Taking the quotient of $(S^3, \widetilde{K})$ by the action of $\mathbb Z_q$ and similarly projecting to the $xy$-plane together with the point at infinity produces a quotient diagram $E$ for $K$.
\medskip
Construct a Heegaard diagram for $K \cup U$ as follows: Begin with the diagram $E$ on $S^2 = \mathbb R^2 \cup \{\infty\}$. Place a basepoint $w_0$ at $\infty$ and $z_0$ at $0$; these will be the sole basepoints on $U$. (This is a slight departure from the notation of Section \ref{Heegaard Floer Background Section}; it will be more convenient to have the indexing start at $w_0$ rather than $w_1$ for the diagrams we construct.) Arrange basepoints $z_1,w_1,\cdots ,z_{n_1},w_{n_1}$ on $K$ such that traversing $K$ in the chosen orientation, one passes through the basepoints in that order. Moreover, we insist that while travelling from $z_i$ to $w_i$ one passes only through undercrossings and travelling from $w_i$ to $z_{i+1}$ or from $w_n$ to $z_1$ one passes only through overcrossings. In other words, we choose basepoints so as to make $E$ into a bridge diagram for $K$. Notice that $n_1$ is at most the number of crossings on the diagram $E$, or half the number of crossings on $\widetilde{E}$. Encircle the portion of the knot running from $z_i$ to $w_i$ with a curve $\alpha_i$, oriented counterclockwise in the complement of $w_0$. Similarly, encircle the portion of the knot running from $w_{i}$ to $z_{i+1}$ (or from $w_{n_1}$ to $z_1$) with a curve $\beta_i$, oriented counterclockwise in the complement of $w_0$. Notice that both $\alpha_i$ and $\beta_i$ run counterclockwise around $w_i$, and moreover for each $i$, $S^2 \backslash \{\alpha_i, \beta_i\}$ has four components: one each containing $z_{i}, w_i$, and $z_{i+1}$, and one containing all other basepoints. This yields a Heegaard diagram $\mathcal D = (S^2, \boldsymbol{\alpha}, \boldsymbol{\beta}, {\bf w}, {\bf z})$ for $(S^3, K \cup U)$.
\medskip
We may now take the branched double cover of $\mathcal D$ over $z_0$ and $w_0$ to produce a Heegaard diagram $\widetilde{\mathcal D}$ for $(S^3, \widetilde{K} \cup U)$ compatible with $\widetilde{E}$. This diagram has basepoints $w_0$ and $z_0$ for $U$ and basepoints $z_1^1, w_1^1,\cdots,z_{n_1}^1,w_{n_1}^1, z_1^2,\cdots,w_{n_1}^2,\cdots,z_1^q,\cdots,w_{n_1}^q$ arranged in that order along the oriented knot $\widetilde{K}$. Each adjacent pair $z_i^a$ and $w_i^a$ is encircled by $\alpha_i^a$ a lift of $\alpha_i$, and each adjacent pair $w_{i}^a$ and $z_{i+1}^a$ is encircled by $\beta_i^a$ a lift of $\beta_i$. (Pairs $w_{n_1}^a$ and $z_1^{a+1}$, as well as $w_{n_1}^q$ and $z_1^1$, are encircled by lifts $\beta_{n_1}^a$ of $\beta_{n_1}$.) This yields a diagram $\widetilde{\mathcal D} = (S^2, \widetilde{\boldsymbol{\alpha}}, \widetilde{\boldsymbol{ \beta}}, \widetilde{\bf w}, \widetilde{\bf z})$ with $qn_1$ each of $\alpha$ and $\beta$ curves and $qn_1 +1$ pairs of basepoints.
\begin{remark}
The notation above is not quite the notation of \cite{Hendricks}, in which the two lifts of a curve $\alpha_i$ in a Heegaard surface $\mathcal D$ to its double branched cover $\widetilde{\mathcal D}$ were $\widetilde{\alpha_i}$ and $\tau(\widetilde{\alpha_i})$. In the new slightly more streamlined notation, adopted in view of the need to work with $q$-fold branched covers and to consider multiple lifts of some of the basepoints, these two curves would be $\alpha_i^1$ and $\alpha_i^2$.
\end{remark}
Let us pause to introduce some notation on the diagram $\mathcal D$ that will be useful in Section \ref{Stable Normal Triv Section}. Let $x_i$ be the single positive intersection point between $\alpha_i$ and $\beta_i$ and $y_i$ the negative intersection point. Moreover, let $F_i$ be the closure of the component of $S - \alpha_i - \beta_i$ containing $z_{i}$ and $E_i$ be the closure of the component of $S - \alpha_i -\beta_i$ containing $z_{i+1}$ (or $z_1$ if $i=n_1$). Then $P_i = E_i - F_i$ is a periodic domain of index zero on $\mathcal D$. Finally, let $\gamma_i$ be the union of the arc of $\alpha_i$ running from $x_i$ to $y_i$ and the arc of $\beta_i$ running from $x_i$ to $y_i$. In particular, this specifies that $\gamma_i$ has no intersection with any $\alpha$ or $\beta$ curves other than $\alpha_i$ and $\beta_i$, and moreover the component of $S - \gamma_i$ which does not contain $w_0$ contains only a single basepoint $w_i$.
\medskip
\begin{figure}
\caption{An equivariant Heegaard diagram $\widetilde{\mathcal D}$ for the trefoil together with the unknotted axis, and its quotient Heegaard diagram $\mathcal D$ for the Hopf link.}
\label{Trefoil Heegaard Diagrams Figure}
\subfloat{\centering
\vtop{
\vskip0pt
\hbox{
\begin{tikzpicture}[scale = .7]
\tikzstyle{every node}=[font=\small]
\node(6) at (-4.8,3){$\alpha_1^1$};
\node(7) at (-4.5,0){$\beta_1^1$};
\node(8) at (4, 1.1){$\beta_1^2$};
\node(9) at (4.8, -1.5){$\alpha_1^2$};
\tikzstyle{every node}=[font=\tiny]
\node(1) at (0,.7){$z_0$};
\node(2) at (-4,1){$z_1^1$};
\node(3) at (-1,0){$w_1^1$};
\node(4) at (1,1){$w_1^2$};
\node(5) at (4,0){$z_1^2$};
\path(2)edge(3);
\path(4)edge(5);
\draw (2) ..controls (-4, 6) and (3,6).. (3, .45);
\draw (3,.15) .. controls (3, -3.5) and (-2,-3.5)..(-2,.15);
\draw (-2,.45)..controls (-2,3) and (1,3)..(4);
\draw (3)..controls (-1,-2) and (2,-2)..(2,.55);
\draw (2,.8)..controls (2, 4.5) and (-3,4.5)..(-3,.8);
\draw (-3,.55)..controls (-3,-5) and (4,-5)..(5);
\tikzset{every path/.style={line width = 2 pt}}
\draw [rotate=-18.5][blue][dashed](-2.53,-.32) ellipse (60pt and 23pt);
\draw [rotate=-18.5][cyan][dashed] (2.21,1.27) ellipse (60pt and 23pt);
\draw[red](-4.3,1)..controls (-4.4,6.65) and (3.4,6.4)..(3.3,.5);
\draw[red](3.3,.5)..controls (3.3,-4.2) and (-2.3,-3.9)..(-2.3,.2);
\draw[red](-2.3,.2)..controls (-2.4,3.6) and (1.4,3.6)..(1.3,1);
\draw[red](1.3,1)..controls (1.4,.5) and (.7,.5) ..(.7,1);
\draw[red](.7,1)..controls (.8,2.8) and (-1.7,2.5)..(-1.7,.5);
\draw[red](-1.7,.5)..controls (-1.8,-3.2) and (2.7, -3.2)..(2.7,.4);
\draw[red](2.7,.4)..controls (2.7, 5.6) and (-3.85, 5.8) ..(-3.7,.9);
\draw[red](-3.7,.9)..controls (-3.7,.5) and (-4.3,.5) ..(-4.3,1);
\draw[magenta](4.3,0)..controls (4.4,-5.65) and (-3.4,-5.4)..(-3.3,.65);
\draw[magenta](-3.3,.65)..controls (-3.3,4.9) and (2.45,5.25) ..(2.3,.3);
\draw[magenta](2.3,.3)..controls (2.2,-2.45) and (-1.4,-2.55)..(-1.3,0);
\draw[magenta](-1.3,0)..controls (-1.3,.5) and (-.7,.5)..(-.7,0);
\draw[magenta](-.7,0)..controls (-.8,-1.8) and (1.7,-1.5)..(1.7,.6);
\draw[magenta](1.7,.6)..controls (1.8,4.2) and (-2.7,4.2) ..(-2.7,.6);
\draw[magenta](-2.7,.6)..controls (-2.7,-4.6) and (3.7,-4.85)..(3.7,.2);
\draw[magenta](3.7,.2)..controls (3.7,.5) and (4.3,.5)..(4.3,0);
\end{tikzpicture}}}}
\hspace{1 cm}
\subfloat{\centering
\vtop{
\vskip0pt
\hbox{
\begin{tikzpicture}[scale = .72]
\tikzstyle{every node}=[font=\small]
\node(6) at (-4.7,3){$\alpha_1$};
\node(7) at (-4.5,0){$\beta_1$};
\tikzstyle{every node}=[font=\tiny]
\node(1) at (0,.7){$z_0$};
\node(2) at (-4,1){$w_1$};
\node(3) at (-1,0){$z_1$};
\path(2)edge(3);
\draw(2)..controls(-4,6) and (3,6)..(3,.5);
\draw (3,.5) .. controls (3, -4.5) and (-3,-4.5)..(-3,.55);
\draw (-3,.8)..controls (-3,4.5) and (2,4.5)..(2,.6);
\draw (2,.6)..controls (2,-3) and (-2,-3)..(-2,.25);
\draw (-2,.4)..controls (-2,3.1) and (.85,2.7)..(1,1);
\draw (1,1)..controls(1.3,-1.6) and (-1,-1.8)..(3);
\tikzset{every path/.style={line width = 2 pt}}
\draw [blue][dashed][rotate=-18.5](-2.53,-.32) ellipse (60pt and 23pt);
\draw[red](-4.3,1) ..controls (-4.5,6.5) and (3.4,6.5)..(3.3,.3);
\draw[red](3.3,.3)..controls (3.4, -4.9) and (-3.4,-5)..(-3.3,.7);
\draw[red](-3.3,.7)..controls (-3.4, 5) and (2.4,5)..(2.3, .5);
\draw[red](2.3, .5)..controls (2.4, -3.5) and (-2.4, -3.5)..(-2.3, .4);
\draw[red](-2.3,.4)..controls (-2.4, 3.5) and (1.4, 3.4)..(1.3,.7);
\draw[red](1.3,.7)..controls (1.4,-2.15) and (-1.4, -2.15)..(-1.3,0);
\draw[red](-1.3,0)..controls (-1.4, .5) and (-.7,.5)..(-.7,0);
\draw[red](-.7,0)..controls (-.8,-1.3) and (.8, -1.2)..(.7,.5);
\draw[red](.7,.5)..controls (.8,2.5) and (-1.8, 2.5)..(-1.7,.4);
\draw[red](-1.7,.4)..controls(-1.8,-2.65) and (1.8, -2.65) ..(1.7,.6);
\draw[red](1.7,.6)..controls(1.8,4) and(-2.8,4) ..(-2.7,.5);
\draw[red](-2.7,.5)..controls(-2.8,-4.1) and (2.8,-4.1) ..(2.7,.5);
\draw[red](2.7,.5)..controls (2.85,5.55) and (-3.8,5.75)..(-3.7,.9);
\draw[red](-3.7,.9)..controls (-3.8,.6) and (-4.4,.6) .. (-4.3,1);
\end{tikzpicture}}}}
\end{figure}
Our next goal will be to investigate the behavior of the relative Maslov and (particularly) Alexander gradings of generators of $\widehat{\mathit{CFK}}(\mathcal D)$ and $\widehat{\mathit{CFK}}(\widetilde{\mathcal D})$. We begin with two relatively simple lemmas. As before, let $\tau$ be the involution on $(S^3, \widetilde{K})$ (and on $\widetilde{\mathcal D}$). Let $\tau^{\#}$ be the induced involution on $\widehat{\mathit{CFK}}(\widetilde{\mathcal D})$.
\begin{lemma}
The induced map $\tau^{\#}$ preserves Alexander and Maslov gradings.
\end{lemma}
\begin{proof}
Let ${\bf s} \in \widehat{\mathit{CFK}}(\widetilde{\mathcal D})$. Choose a generator ${\bf x} \in \widehat{\mathit{CFK}}(\mathcal D)$, and let $\widetilde{\bf x} = \pi^{-1}(x)$, such that $\widetilde{\bf x}$ is a generator in $\widehat{\mathit{CFK}}(\widetilde{\mathcal D})$ which is invariant under $\tau^{\#}$. Choose a Whitney disk $\phi$ in $\pi_2({\bf s},\widetilde{\bf x})$ and let $D$ be its shadow onon $\mathcal D$. Then $\tau \circ \phi$ is a Whitney disk in $\pi_2(\tau({\bf s}),\widetilde{\bf x})$ with shadow $\tau(D)$. Furthermore, since $w_0$ and $z_0$ are fixed by the involution, $n_{z_0}(D)=n_{z_0}(D)$ and $n_{w_0}(D) = n_{w_0}(\tau(D))$, whereas since the remaining basepoints are interchanged by the involution, we have $\sum_{i=0}^{n_1} n_{z_i}(D) = \sum_{i=0}^{n_1} n_{z_i}(\tau(D))$ and $\sum_{i=0}^{n_1} n_{w_i}(D) = \sum_{i=0}^{n_1} n_{w_i}(\tau(D))$. These equalities imply that $M(\widetilde{\bf x}) - M({\bf s}) = M(\widetilde{\bf x}) - M(\tau^{\#}({\bf s}))$, and similarly for $A_1$ and $A_2$. Therefore ${\bf s}$ and $\tau^{\#}{\bf s}$ are in identical gradings.
\end{proof}
\begin{lemma} \label{Maslov Index Lemma}
Let $\phi \in \pi_2({\bf x},{\bf y})$ be a Whitney disk between generators ${\bf x}$ and ${\bf y}$ in $\widehat{\mathit{CFK}}(\mathcal D)$ with Maslov index $\mu(\phi)$, with shadow the domain $D$ on $\mathcal D$. There is a Whitney disk $\widetilde{\phi} \in \pi_2(\widetilde{{\bf x}},\widetilde{{\bf y}})$ with shadow the domain $\pi^{-1}(D)$ on $\widetilde{\mathcal D}$, and $\mu(\widetilde{\phi}) = q \mu(\phi) - (q-1)(n_{z_0}(\phi) + n_{w_0}(\phi))$.
\end{lemma}
\begin{proof}
The boundary of the lift $\pi^{-1}(D)$ is trivial as a one cycle in $S^3$, implying that $\pi^{-1}(D)$ is the shadow of a Whitney disk $\widetilde{\phi} \in \pi_2(\widetilde{{\bf x}},\widetilde{{\bf y}})$. We will compare the Maslov index of $\phi$ with the Maslov index of $\widetilde{\phi}$ using the formula \ref{Maslov index formula}. As in that formula, we will write $D$ as a sum of the closures of the components of $S^2 - {\boldsymbol \alpha} - {\boldsymbol \beta}$. Say there are $m$ such components, and label them as follows. There are two domains in $S^2 - {\boldsymbol \alpha} -{\boldsymbol \beta}$ which contain a branch point. Let these be $D_1$ containing $z_0$ and $D_2$ containing $w_0$. Let the shadow of $\phi$ be $a_1D_1 + a_2D_2 +\sum_{i=3}^{m} a_iD_i$. Then the Maslov index of $\phi$ is
\[
\mu(\phi) = \sum_i a_i e(D_i) + p_{\bf x}(D) + p_{\bf y}(D)
\]
Let us now consider applying the same formula to $\pi^{-1}(D) = \sum_i a_i \pi^{-1}(D_i)$. For $i\geq 3$, the lift $\pi^{-1}(D_j)$ of $D_j$ consists of $q$ copies of $D_i$, and by additivity of the Euler measure we see that $e(\widetilde{D}_i) = qe(D_i)$. For $i=1,2$, let $2k_i$ be the number of corners of $D_i$. Then $\pi^{-1}(D_i) = \widetilde{D}_i$ is a single component of $S^2 \ \widetilde{\boldsymbol \alpha} - \widetilde{\boldsymbol \beta}$ with $2qk_i$ corners and Euler measure $e(\widetilde{D}_i) = 1 - \frac{qk_i}{2} = q(e(D_i)) - (q-1)$. Notice, furthermore, that $p_{\widetilde{\bf x}}(\widetilde{D}) = q p_{\bf x}(D)$ and $p_{\widetilde{\bf y}}(\widetilde{D}) = q p_{\bf y}(D)$. Therefore we compute
\begin{align*}
\mu(\widetilde{\phi}) &= \sum_i a_i e(\pi^{-1}(D_i)) + p_{\widetilde{\bf x}}(\widetilde{D}) + p_{\widetilde{\bf y}}(\widetilde{D}) \\
&= a_1 e(\widetilde{D}_1) + a_2 e(\widetilde{D}_2) + q \sum_{i=3}^m e(D_i) + qp_{\bf x}(D) + q p_{\bf y}(D)\\
&=a_1 (qe(D_1) - (q-1)) + a_2(qe(D_2) - (q-1)) + q \sum_{i=3}^m e(D_i) + qp_{\bf x}(D) + q p_{\bf y}(D)\\
&=q\mu(\phi) - (q-1)(k_1 + k_2).
\end{align*}
Since $k_1+k_2$ is exactly $n_{z_0}(D) + n_{w_0}(D)$ the total algebraic intersection of $\phi$ with the branch points, this proves the result. \end{proof}
We can now construct the relationship between the Alexander gradings of the generators of $\widehat{\mathit{CFL}}(\widetilde{\mathcal D})$ and $\widehat{\mathit{CFL}}(\mathcal D)$. For the case of a $q$-periodic knot, we will look only at the relative gradings; later in the particular case of a doubly-periodic knot we will fix the absolute gradings using symmetries of link Floer homology. Let $\pi:\widetilde{\mathcal D} \rightarrow \mathcal D$ be the restriction of the branched covering map $(S^3, \widetilde{K} \cup U) \rightarrow (S^3, K\cup U)$. We have the following, which is analogous to \cite[Lemma 3.1]{MR2443111}.
\begin{lemma} \label{Generator Matching Lemma}
Let ${\bf s} \in \widehat{\mathit{CFL}}(\widetilde{\mathcal D})$, thought of as an $qn_1$-tuple of points on $S^2$ with one point on each $\alpha_i^{k}$ and one on each $\beta_i^{k}$. Consider its projection $\pi({\bf s})$ to $qn_1$ points on $\mathcal D$. There is a (not at all canonical) way to write $\pi({\bf s})$ as ${\bf s_1} \cup {\bf s_2} \cup \cdots \cup {\bf s_q}$ a union of generators in $\widehat{\mathit{CFL}}(\mathcal D)$.
\end{lemma}
The proof of this lemma (pointed out by Adam Levine) is an application of the following combinatorial result of Hall \cite{Hall}. Let $A$ be a set, and $\{ A_i\}_{i=1}^m$ be a collection of finite subsets (a version also exists for infinitely many $A_i$). A \textit{system of distinct representatives} is a choice of elements $j_i \in A_i$ for each $i$ such that $j_{i_1} \neq j_{i_2}$ if $i_1 = i_2$. Hall's theorem gives conditions under which a system of distinct representatives exists.
\begin{theorem} \cite[Theorem 1]{Hall} Let $\{A_i\}_{i=1}^m$ be finitely many subsets of a set $A$. Then a system of distinct representatives exists if and only if, for any $1\leq s\leq m$ and $1<i_1<...<i_s<m$, $A_{i_1} \cup ... \cup A_{i_s}$ contains at least $s$ elements.
\end{theorem}
Using this, we may prove Lemma \ref{Generator Matching Lemma}
\begin{proof}[Proof of Lemma \ref{Generator Matching Lemma}] Let $A = \{1,...,q\}$. For $1 \leq i \leq n_1$, let $A_i$ be a set of integers $j$, with $1\leq j \leq n_1$, such that each $j$ appears once in $A_i$ for every point of $\pi({\bf s}) \cap (\alpha_i \cap \beta_j)$. That is, the sets $A_i$ record how many intersection points on $\alpha_i$ also lie on $\beta_j$. Notice that there are $q$ elements in each $A_i$, and each $j$ appears exactly $q$ times in $\coprod_i A_i$. We claim the sets $A_i$ satisfy the condition of Hall's theorem. For given $1 \leq i_1 \leq ... \leq i_m \leq n_1$, the disjoint union $\coprod_{k=1}^{m} A_{i_k}$ contains $qm$ elements, and therefore must contain at least $m$ different integers $j$. Therefore $\bigcup_{k=1}^{m} A_{i_k}$ contains at least $m$ elements. Hence we can choose a set of distinct representatives $j_i$ in $A_i$. There is a generator ${\bf s_1}$ consisting of points in $\pi({\bf s})$ on $\alpha_i \cap \beta_{j_i}$. Remove these points from $\pi({\bf s})$ (and the individual $j_i$ from the sets $A_i$, producing new sets $A_i'$) and repeat the argument, now with $q-1$ elements in each $A_i'$ and $q-1$ appearances of each symbol $j$ in $\coprod_i A_i'$. After $q$ repetetions, we have broken ${\bf s}$ into ${\bf s_1}\cup...\cup {\bf s_q}$, where each ${\bf s_i}$ is a generator for $\widehat{\mathit{CFL}}(\mathcal D)$. This choice of partition is not at all unique.
\end{proof}
We can start by determining the relative Alexander gradings of generators of $\widehat{\mathit{CFL}}(\widetilde{\mathcal D})$ which are invariant under the action of $\mathbb Z_q$ on $\widetilde{\mathcal D}$; that is, exactly those generators which are total lifts of generators in $\widehat{\mathit{CFL}}(\mathcal D)$ under the projection map $\pi$.
\begin{lemma}
Let ${\bf x}, {\bf y} \in \mathbb T_{\boldsymbol \alpha} \cap \mathbb T_{\boldsymbol \beta}$ and $\widetilde{\bf x}, \widetilde{\bf y}$ be their total lifts in $\mathbb T_{\widetilde{\boldsymbol \alpha}} \cap \mathbb T_{\widetilde{\boldsymbol \beta}}$. Then
\begin{align*}
A_1(\widetilde{\bf y}) - A_1(\widetilde{\bf x}) &= q\left(A_1({\bf y}) - A_1({\bf x})\right) \\
A_2(\widetilde{\bf y}) - A_2(\widetilde{\bf x}) &= A_2({\bf y}) - A_2({\bf x}).
\end{align*}
\end{lemma}
\begin{proof}
Let $D$ be a domain from ${\bf x}$ to ${\bf y}$ on $\mathcal D$. Then $\pi^{-1}(D) = \widetilde{D}$ is a domain from ${\bf x}$ to ${\bf y}$ on $\widetilde{\mathcal D}$. Since $z_0$ and $w_0$ are branch points of $\pi$, we see that $n_{z_0}(\widetilde{D}) = n_{z_0}(D)$ and $n_{w_0}(\widetilde{D}) = n_{w_0}(D)$. Therefore
\begin{align*}
A_2(\widetilde{\bf x}) - A_2(\widetilde{\bf y}) &= n_{z_0}(\widetilde{D}) - n_{w_0}(\widetilde{D}) \\
&=n_{z_0}(D) - n_{w_0}(D) \\
&=A_2({\bf x}) - A_2({\bf y})
\end{align*}
However, for $i\neq 0$, each of $z_i$ and $w_i$ basepoints on $\mathcal D$ has $q$ preimages in $\widetilde{\mathcal D}$. Moreover $n_{z_i^j}(\widetilde{D}) = n_{z_i}(D)$ for all $1 \leq i \leq n_1$ and $1 \leq j \leq q$, and similarly for $w_i^j$, so we compute as follows.
\begin{align*}
A_1(\widetilde {\bf x}) - A_1(\widetilde{\bf y}) &= \sum_{i=1}^{n_1}\sum_{j=1}^q n_{z_i^j}(\widetilde{D}) - \sum_{i=1}^{n_1}\sum_{j=1}^q n_{w_i^j}(\widetilde{D}) \\
&= q \left( \sum_{i=1}^{n_1} n_{z_i}(D) - \sum_{i=1}^{n_1} n_{w_i}(D)\right) \\
&= q \left( A_1({\bf x}) - A_1({\bf y})\right) \qedhere
\end{align*}
\end{proof}
Finally, we can use this fact to compute the relative Alexander gradings in $\widehat{\mathit{CFL}}(\widetilde{\mathcal D})$.
\begin{lemma}
Let ${\bf s}, {\bf r}$ be generators of $\widehat{\mathit{CFL}}(\widetilde{\mathcal D})$, whose projection to $\mathcal D$ can be written as $\pi_{*}({\bf s}) = {\bf s_1} \cup \cdots \cup {\bf s_q}$ and ${\bf r} = {\bf r_1} \cup \cdots \cup {\bf r_q}$. Then the relative Alexander gradings between ${\bf s}$ and ${\bf r}$ is described by
\begin{align*}
A_1({\bf s}) - A_1({\bf r}) = \sum_{j=1}^{q} \left(A_1({\bf s_j}) - A_1({\bf r_j})\right) \\
A_2({\bf s}) - A_2({\bf r}) = \frac{1}{q} \left(\sum_{j=1}^{q} \left(A_1({\bf s_j}) - A_1({\bf r_j})\right)\right).
\end{align*}
\end{lemma}
\begin{proof}
The proof is quite similar to the first half of the argument of \cite[Proposition 3.4]{MR2443111}. As in Section \ref{Heegaard Floer Background Section}, choose paths $a:[0,1] \rightarrow \mathbb T_{\widetilde{\boldsymbol \alpha}}$, $b:[0,1] \rightarrow \mathbb T_{\widetilde{\boldsymbol \beta}}$, with $\partial a = \partial b = {\bf s} - {\bf r}$. Then let $\epsilon({\bf s},{\bf r}) = a -b $ be a one-cycle in $H_1(S^3 - (\widetilde{K} \cup U); \mathbb Z)$. Consider the projection $\pi_{*}(\epsilon({\bf s},{\bf r}))$ to $\mathcal D$. The restriction of this one-cycle to any $\alpha$ or $\beta$ curve consists of $q$ possibly overlapping arcs. By adding copies of the $\alpha$ or $beta$ circle if necessary, we may arrange that these arcs connect a point in ${\bf s_j}$ to a point in ${\bf r_j}$. That is, modulo $\alpha$ and $\beta$ curves, which have linking number zero with the knot, $\pi_{*}\epsilon({\bf s},{\bf r}) \equiv \epsilon({\bf r_1},{\bf s_1}) \cdots \epsilon({\bf r_q}, {\bf s_q})$. Notice also that $\ell k\left(\pi_{*}\epsilon({\bf s},{\bf r}), K\right) = \ell k \left(\epsilon({\bf s},{\bf r}), \widetilde{K}\right)$, whereas $\ell k\left(\pi_{*}\epsilon({\bf s},{\bf r}), U\right) = q \ell k \left(\epsilon({\bf s},{\bf r}), U \right)$. Therefore we compute
\begin{align*}
A_1({\bf r}) - A_1({\bf s}) &= \ell k\left(\epsilon({\bf s},{\bf r}),\widetilde{K}\right) \\
&=\ell k(\pi_{*}(\epsilon({\bf s},{\bf r})), K) \\
&=\sum_{j=1}^{q}\ell k \left(\epsilon\left({\bf s_j},{\bf r_j}\right),K \right)\\
&=\sum A_1({\bf r}) - A_1({\bf s})\\
\end{align*}
and moreover
\begin{align*}
A_2({\bf r}) - A_2({\bf s}) &= \ell k\left(\epsilon({\bf s},{\bf r}), U\right)\\
&=\frac{1}{q}\ell k\left(\pi_{*}(\epsilon({\bf s},{\bf r})), U\right)\\
&=\frac{1}{q}\left(\sum_{j=1}^{q}\left(\ell k(\epsilon({\bf s_j},{\bf r_j}),U \right)\right) \\
&=\frac{1}{q}\left(\sum_{j=1}^{q}\left( A_1({\bf r_j}) - A_1({\bf s_j})\right)\right)\qedhere
\end{align*}
\end{proof}
We may now give a proof of Theorem \ref{Murasugi Theorem}.
\begin{proof}[Proof of Theorem \ref{Murasugi Theorem}]
Let $\widetilde{K}$ be a periodic knot with period $q=p^r$ for some prime $p$, and $K$ be its quotient knot, and let $\lambda = \ell k(\widetilde{K},U) = \ell k(K,U)$. Choose a periodic diagram $\widetilde{D}$ for $(S^3, \widetilde{K} \cup U)$ and its quotient diagram $D$ for $(S^3, K \cup U)$ as outlined above.
\medskip
Consider the Euler characteristic of $\widehat{\mathit{CFL}}(\widetilde{\mathcal D})$ computed modulo $p$. Let ${\bf s}$ be a generator in $\widehat{\mathit{CFL}}(\widetilde{\mathcal D})$. Either ${\bf s} = \widetilde{{\bf y}} = \pi^{-1}(\bf y)$ for some ${\bf y} \in \widehat{\mathit{CFL}}(\mathcal D)$, and thus ${\bf s}$ is invariant under the action of $\tau$, or the order of the orbit of ${\bf s}$ under the action of $\tau$ is a multiple of $p$. Since the action preserves the Alexander and Maslov gradings, modulo $p$ the terms of the Euler characteristic of $\widehat{\mathit{CFL}}(\widetilde{\mathcal D})$ corresponding to noninvariant generators sum to zero. Moreover, there is a one-to-one correspondence between generators ${\bf y}$ of $\widehat{\mathit{CFL}}(\mathcal D)$ and their total lifts $\pi^{-1}({\bf y}) = {\bf s}$ in $\widehat{\mathit{CFL}}(\widetilde{\mathcal D})$.
\medskip
This correspondence preserves relative Alexander $A_2$-gradings, and multiplies Alexander $A_1$-gradings by a factor of $q$. We also claim that it preserves the parity of relative Maslov gradings if $p$ is odd. In particular, any two generators ${\bf x}$ and ${\bf y}$ of $\widehat{\mathit{CFL}({\mathcal D})}$ are joined by a domain $D$ which does not pass over $w_0$. Let $\widetilde{ D}$ be the lift of this domain. Then the Maslov index $\mu(\widetilde{D})$ is equal to $q\mu(D) - (q-1)n_{z_0}(D)$ by Lemma \ref{Maslov Index Lemma}, and we have
\begin{align*}
M(\widetilde{{\bf x}}) - M(\widetilde{{\bf y}}) &= \mu(\widetilde{D}) - 2\sum_{i=1}^{n} \left(n_{w_i^1}(\widetilde{D}) + \cdots + n_{w_i^q}(\widetilde{D})\right) \\
&= q\mu(D) -(q-1) n_{z_0}(D) - 2q\sum_{i=1}^n n_{w_i}(D) \\
&\equiv \mu(D) - 2\sum_{i=1}^n n_{w_i}(D) \text{ mod } 2\\
&\equiv M({\bf x}) - M({\bf y}) \text{ mod } 2
\end{align*}
Here we have used the assumption that $p$, and therefore $q$, is odd, and $q-1$ is even. Ergo if $p$ is odd, $(-1)^{M(\widetilde{{\bf x}})} = \pm (-1)^{M({\bf x})}$ where the choice of sign is the same for all generators ${\bf x}$ of $\widehat{\mathit{CFL}}(\mathcal D)$. (If $p=2$ the sign is of course immaterial.)
\medskip
Therefore, we capture the following equality.
\begin{align*}
\chi(\widehat{\mathit{CFL}}(\widetilde{\mathcal D})) (t_1, t_2) \doteq \chi(\widehat{\mathit{CFL}}(\widetilde{\mathcal D}) )(t_1^q, t_2) \text{ mod } p
\end{align*}
\noindent Here $\doteq$ denotes equivalence up to an overall factor of $\pm t_1^{i_1}t_2^{i_2}$.
\medskip
However, $\chi(\widehat{\mathit{CFL}}(\widetilde{\mathcal D}))$ is $\chi(\widetilde{\mathit{HFL}}(\widetilde{\mathcal D}))$ and similarly $\chi(\widehat{\mathit{CFL}}(\mathcal D))$ is $\chi(\widetilde{\mathit{HFL}}(\mathcal D))$. Hence we may express the Euler characteristics of the chain complexes as the multivariable Alexander polynomials of $\widetilde{L} = \widetilde{K} \cup U$ and $L = K \cup U$ multiplied by appropriate powers of $(1- t_1^{-1})$ and $(1-t_2^{-1})$ according to the number of basepoints on each component of each link. Therefore the equality above reduces to
\begin{align*}
\Delta_{\widetilde{L}}(t_1, t_2)(1-t_1^{-1})^{qn}(1-t_2^{-1}) &\doteq \Delta_{L}(t_1^q, t_2)(1-(t_1^{-1})^{q})^n(1-t_2^{-1}) \text{ mod } p\\
\Delta_{\widetilde{L}}(t_1, t_2)(1-t_1^{-1})^{qn} &\doteq \Delta_{L}(t_1^q, t_2)(1-(t_1^{-1})^{q})^n \text{ mod } p.
\end{align*}
\noindent Recalling that $q$ is a power of $p$, and that therefore $(a+b)^q \equiv a^q + b^q$ mod $p$, we may reduce farther.
\begin{align*}
\Delta_{\widetilde{L}}(t_1,t_2)(1-t_1^{-1})^{qn} &\doteq \Delta_{L}(t_1^q, t_2)(1-t_1^{-1})^{qn} \text{ mod } p\\
\Delta_{\widetilde{L}}(t_1,t_2) &\doteq \Delta_{L}(t_1^q, t_2) \text{ mod } p
\end{align*}
\noindent We now set $t_2 = 1$. By Lemma \ref{Two Component Link Lemma}, this reduces the equality above to
\begin{align*}
\Delta_{\widetilde{K}}(t_1)(1 + t_1 + \cdots + t_1^{\lambda-1}) &\doteq \Delta_{K}(t_1^q)(1 + t_1^q + (t_1^q)^2 + \cdots + ((t_1)^q)^{\lambda-1} \text{ mod } p.
\end{align*}
\noindent Again using the fact that $q$ is a power of $p$, we produce
\begin{align*}
\Delta_{\widetilde{K}}(t_1)(1 + t_1 + \cdots + t_1^{\lambda-1}) &\doteq (\Delta_{K}(t_1))^q(1 + t_1 + t_1^2 + \cdots + t_1^{(\lambda-1)})^q \text{ mod } p \\
\Delta_{\widetilde{K}}(t_1) &\doteq (\Delta_K(t_1))^q (1 + t + \cdots + t^{\lambda -1})^{q-1} \text{ mod } p.
\end{align*}
\noindent This last is Murasugi's condition.\end{proof}
\subsection{Spectral sequences for doubly-periodic knots}
From now on we restrict ourselves entirely to the case of $\widetilde{K}$ a doubly-periodic knot. Moreover, we insist that $\widetilde{K}$ be oriented such that $\ell k(\widetilde{K},U) = \lambda$ is positive. Notice also that $\lambda$ is necessarily odd; otherwise $\tilde{K}$ would be disconnected. We proceed to explain how Corollary \ref{Genus Inequality Corollary} follows from Theorem \ref{Knot Floer Homology Spectral Sequence}. Consider the map $\tau^{\#}: \widehat{\mathit{CFL}}(\widetilde{\mathcal D}) \rightarrow \widehat{\mathit{CFL}}(\widetilde{\mathcal D})$ induced by the involution $\tau$ on $\widetilde{\mathcal D}$. As a consequence of our application of Seidel and Smith's localization theory to the symmetric products $\text{Sym}^{2n_1}(S^2 \backslash \{{\bf \widetilde{w},\widetilde{z}}\})$ and $\text{Sym}^{n_1}(S^2 \backslash \{{\bf w,z}\})$, we will see that $\tau^{\#}$ is a chain map for this Heegaard diagram. The spectral sequence of Theorem \ref{Link Floer Homology Spectral Sequence} is induced by the double complex
$$
\xymatrix{
& \ar[d]^-{\partial} & \ar[d]^-{\partial} & \ar[d]^-{\partial} & \\
0 \ar[r] & \widehat{\mathit{CFL}}_{i+1}(\widetilde{\mathcal D}) \ar[r]^-{1 + \tau^{\#}} \ar[d]^-{\partial} & \widehat{\mathit{CFL}}_{i+1}(\widetilde{\mathcal D}) \ar[r]^-{1 + \tau^{\#}} \ar[d]^-{\partial}& \widehat{\mathit{CFL}}_{i+1}(\widetilde{\mathcal D}) \ar[r]^-{1 + \tau^{\#}} \ar[d]^-{\partial} &\\
0 \ar[r] & \widehat{\mathit{CFL}}_{i}(\widetilde{\mathcal D}) \ar[r]^-{1 + \tau^{\#}} \ar[d]^-{\partial} & \widehat{\mathit{CFL}}_{i}(\widetilde{\mathcal D}) \ar[r]^-{1 + \tau^{\#}} \ar[d]^-{\partial}& \widehat{\mathit{CFL}}_{i}(\widetilde{\mathcal D}) \ar[r]^-{1 + \tau^{\#}} \ar[d]^-{\partial} &\\
0 \ar[r] & \widehat{\mathit{CFL}}_{i-1}(\widetilde{\mathcal D}) \ar[r]^-{1 + \tau^{\#}} \ar[d]^-{\partial} & \widehat{\mathit{CFL}}_{i-1}(\widetilde{\mathcal D}) \ar[r]^-{1 + \tau^{\#}} \ar[d]^-{\partial}& \widehat{\mathit{CFL}}_{i-1}(\widetilde{\mathcal D}) \ar[r]^-{1 + \tau^{\#}} \ar[d]^-{\partial}& \\
& & & &
}
$$
\begin{definition}
The homology of the complex $(\widehat{\mathit{CFL}}(\widetilde{\mathcal D} \otimes \mathbb Z_2[[\theta]], d + \theta(1+ \tau^{\#}))$ is $\widehat{\mathit{HFL}}_{\text{borel}}(\widetilde{\mathcal D})$.
\end{definition}
Computing vertical differentials first, the $E^1$ page of this spectral sequence is $\widetilde{\mathit{HFL}}(\widetilde{\mathcal D}) \otimes \mathbb Z_2[[\theta]]$; we will see that after tensoring with $\theta^{-1}$, the $E^{\infty}$ page is $\mathbb Z((\theta))$-isomorphic to $\widetilde{\mathit{HFL}}(\mathcal D) \otimes \mathbb Z_2((\theta))$ under a map which preserves both Alexander gradings. Since $\partial$ and $\tau^{\#}$ also preserve both Alexander gradings on $\widehat{\mathit{CFL}}(\widetilde{D})$, the spectral sequence splits along the grading $(A_1,A_2)$.
\medskip
The knot Floer homology spectral sequence of Theorem \ref{Knot Floer Homology Spectral Sequence} arises from a similar double complex. Recall that for a link $L=K_1\cup\cdots\cup K_\ell$, the differential $\partial_{K_j}$ corresponds to forgetting the component $K_j$ of the link. Therefore for the link $L=\widetilde{K}\cup U$, let $\partial_U$ be the differential corresponding to forgetting the component $U$ of $L$.
$$
\xymatrix{
& \ar[d]^-{\partial_U} & \ar[d]^-{\partial_U} & \ar[d]^-{\partial_U} & \\
0 \ar[r] & \widehat{\mathit{CFL}}_{i+1}(\widetilde{\mathcal D}) \ar[r]^-{1 + \tau^{\#}} \ar[d]^-{\partial_U} & \widehat{\mathit{CFL}}_{i+1}(\widetilde{\mathcal D}) \ar[r]^-{1 + \tau^{\#}} \ar[d]^-{\partial_U}& \widehat{\mathit{CFL}}_{i+1}(\widetilde{\mathcal D}) \ar[r]^-{1 + \tau^{\#}} \ar[d]^-{\partial_U} &\\
0 \ar[r] & \widehat{\mathit{CFL}}_{i}(\widetilde{\mathcal D}) \ar[r]^-{1 + \tau^{\#}} \ar[d]^-{\partial_U} & \widehat{\mathit{CFL}}_{i}(\widetilde{\mathcal D}) \ar[r]^-{1 + \tau^{\#}} \ar[d]^-{\partial_U}& \widehat{\mathit{CFL}}_{i}(\widetilde{\mathcal D}) \ar[r]^-{1 + \tau^{\#}} \ar[d]^-{\partial_U} &\\
0 \ar[r] & \widehat{\mathit{CFL}}_{i-1}(\widetilde{\mathcal D}) \ar[r]^-{1 + \tau^{\#}} \ar[d]^-{\partial_U} & \widehat{\mathit{CFL}}_{i-1}(\widetilde{\mathcal D}) \ar[r]^-{1 + \tau^{\#}} \ar[d]^-{\partial_U}& \widehat{\mathit{CFL}}_{i-1}(\widetilde{\mathcal D}) \ar[r]^-{1 + \tau^{\#}} \ar[d]^-{\partial_U}& \\
& & & &
}
$$
\begin{definition}
The homology of the complex $(\widehat{\mathit{CFK}}(\mathcal D \otimes \mathbb Z_2[[\theta]], d_U + \theta(1+ \tau^{\#}))$ is $\widehat{\mathit{HFL}}_{\text{borel}}(\mathcal D)$.
\end{definition}
As $\partial_U$ does not preserve Alexander $A_2$ gradings relative to the axis $U$, neither does the spectral sequence; however, it still splits along Alexander $A_1$ gradings, the grading relative to the knot itself.
\medskip
Let us consider how we might fix the relationship between the absolute Alexander gradings of $\widetilde{\mathit{HFL}}(\widetilde{\mathcal D})$ and $\widetilde{\mathit{HFL}}(\mathcal D)$. Let $x_L([K])$ be the Thurston seminorm of the class dual to $[K]$ in $H^2(S^3 - \nu(L), \partial(S^3 - \nu(L)))$ and $x_{\widetilde{L}}([\widetilde{K}])$ be the Thurston seminorm of the class dual to $[\widetilde{K}]$ in $H^2(S^3 - \nu(\widetilde{L}), \partial(S^3 - \nu(\widetilde{L})))$. Notice that if $F$ is a Thurston-norm minimizing surface for the class dual to $[K]$ of Euler characteristic $\chi(F) = -x_L([K])$, then the preimage $\widetilde{F} = \pi^{-1}(F)$ under the ordinary double cover $\pi: S^3-\nu(\widetilde{L}) \rightarrow S^3-\nu(L)$ is an embedded surface representing the class dual to $[\widetilde{K}]$ in $H^2(S^3 - \nu(\widetilde{L}), \partial(S^3 - \nu(\widetilde{L})))$, and $\chi(\widetilde{F}) = 2\chi(F)$. Hence $x_{\widetilde{L}}([\widetilde{K}]) \leq 2x_L([K])$.
However, recall that $x_{L}([K]) + 1$ is exactly the breadth of the Alexander $A_1$ grading in $\widehat{\mathit{HFL}}(S^3, L)$, and therefore that the breadth of the $A_1$ grading in $\widetilde{\mathit{HFL}}(\mathcal D)$ is $x_{L}([K]) + 1 + (n_1 -1) = x_{L}([K]) + n_1$. Similarly, the breadth of the $A_1$ grading in $\widehat{\mathit{HFL}}(S^3, \widetilde{L})$ is $x_{\widetilde{L}}([K]) +1$, and therefore the total breadth of the $A_1$ grading in $\widetilde{\mathit{HFL}}(\widetilde{\mathcal D})$ is $x_{\widetilde{L}}([K]) + 1 + 2n_1 - 1 = x_{\widetilde{L}}([\widetilde{K}]) + 2n_1$. Moreover, we have seen that in the spectral sequence from $E^1 = \widetilde{\mathit{HFL}}(\widetilde{\mathcal D}) \otimes \mathbb Z_2((\theta))$ to $E^{\infty} \cong \widetilde{\mathit{HFL}}(\mathcal D) \otimes \mathbb Z_2((\theta))$, the relative $A_1$ grading of two elements on the $E^1$ page is twice the relative $A_1$ grading of their residues on the $E^{\infty}$ page. Therefore the total breadth of the $A_1$ grading on the $E^1$ page of the spectral sequence is at least twice the breadth of the $A_1$ grading on the last page of the spectral sequence. We thus have the inequality
\begin{align*}
x_{\widetilde{L}}([\widetilde{K}]) + 2n_1 &\geq 2x_{L}([K]) + 2n_1 \\
&\geq 2(x_L([K]) + n_1)\\
\text{i.e. } x_{\widetilde{L}}([\widetilde{K}])&\geq 2x_L([K]).
\end{align*}
Consequently we see directly from the spectral sequence that
\begin{align*}
x_{\widetilde{L}}([\widetilde{K}]) = 2 x_L([K]).
\end{align*}
\noindent This implies that the breadth of the $A_1$ grading on the $E^1$ page is exactly twice the breadth of the $A_1$ grading on the $E^{\infty}$ page. Therefore the breadth of the $A_1$ grading cannot decrease over the course of the spectral sequence. A similar argument, sans the factors of two, shows that $x_{\widetilde L}([U]) = x_{L}([U])$ and that the breadths of the Alexander $A_2$ grading of the $E^1$ and $E^{\infty}$ pages of the spectral sequence are the same. Therefore the breadth of the $A_2$ grading does not change either over the course of the spectral sequence.
\medskip
In particular, the top $A_1$ grading in $\widetilde{\mathit{HFL}}(\widetilde{\mathcal D})$ is sent to the top $A_1$ grading in $\widetilde{\mathit{HFL}}(\mathcal D)$. However, by the symmetry of $\widehat{\mathit{HFL}}$ and the determination of the breadth of the $A_1$ grading by the Thurston norm, the top $A_1$ grading of $\widetilde{\mathit{HFL}}(\widetilde{\mathcal D})$ is the same as the top $A_1$ grading of $\widehat{\mathit{HFL}}(S^3, \widetilde{L})$, which is $A_1 = \frac{x_{\widetilde{L}}([\widetilde{K}]) +1}{2} = \frac{2x_L(K) +1}{2}$. Similarly, the top $A_1$ grading of $\widetilde{\mathit{HFL}}(\mathcal D)$ is the top $A_1$ grading of $\widehat{\mathit{HFL}}(S^3, L)$, to wit, $A_1 = \frac{x_L([K]) + 1}{2}$. Therefore the spectral sequence carries the $A_1$ Alexander grading $\frac{2x_L([K]) + 1}{2}$ on the $E^1$ page to the $A_1$ grading $\frac{x_L([K]) + 1}{2}$ on the $E^{\infty}$ page. Therefore in general the $A_1$ grading $\frac{2x_L([K]) + 1}{2} + 2a$ on the $E^1$ page is sent to the $A_1$ grading $\frac{x_L([K]) +1}{2} + a$ for any integer $a$. Notice that $x_L([K])$ is an even number: suppose $F$ is a Thurston-seminorm minimizing surface for $K$ in $S^3-L$ of genus $g'$ with geometric intersection number $\#(F\cap U) =\Lambda$. Since the algebraic intersection number $\lambda$ is odd, so is $\Lambda$. Then $x_L([K]) = 1-2g'-\Lambda$ is even. Therefore $\frac{x_L([K])}{2}$ is an integer, and we can take $a= \frac{x_L([K])}{2}$ to see that the $A_1$ grading $\frac{1}{2}$ on the $E^1$ page is sent to the $A_1$ grading $\frac{1}{2}$ on the $E^{\infty}$ page.
\medskip
A parallel but simpler argument for the $A_2$ gradings shows that the $A_2$ grading $b$ on the $E^1$ page is sent precisely to the $A_2$ grading $b$ on the $E^{\infty}$ page, using the fact that relative $A_2$ gradings of elements on the $E^1$ page that survive in the spectral sequence are preserved rather than doubled on the $E^{\infty}$ page.
\begin{remark} The observations that $x_{\widetilde{L}}([\widetilde{K}]) = 2 x_L([K])$ and $x_{\widetilde L}([U]) = x_{L}([U])$ are a special case of Gabai's theorem \cite[Corollary 6.13]{MR723813} that the Thurston norm is multiplicative for ordinary finite covers. Indeed, by appealing to Gabai's theorem (or by constructing a $\mathbb Z_p$ analog of Seidel and Smith's localization spectral sequence) we could similarly fix the relationship between the absolute gradings of $\widetilde{\mathit{HFL}}(\widetilde{\mathcal D})$ and $\widetilde{\mathit{HFL}}(\mathcal D)$ for $p$-periodic knots.
\end{remark}
\begin{remark}
The reader may wonder at the asymmetry in the construction that causes the $A_1$ gradings $\frac{1}{2}$ and $\frac{1}{2}$ to correspond, but not the gradings $-\frac{1}{2}$ and $-\frac{1}{2}$. This is the result of the convention that the two summands of the vector space $V_1$ have $A_1$ gradings $0$ and $-1$.
\medskip
This serves to illustrate the important role of the vector space $V_1$ in the existence of the spectral sequence; the breadth of the $A_1$ grading in $\widehat{\mathit{HFL}}(S^3, \widetilde{L})$ is one less than twice the breadth of the $A_1$ grading in $\widehat{\mathit{HFL}}(S^3, L)$, a trouble which is corrected for by increasing the breadth upstairs by $2n_1 - 1$ and the breadth downstairs by $n_1 -1$. While one might hope to produce a spectral sequence in which $n_1 =1$, it seems impossible to produce a link Floer homology spectral sequence for doubly periodic knots which does not involve at least one copy of $V_1$ on the $E^1$ page.
\end{remark}
We summarize the discussion above in the following two lemmas.
\begin{lemma} \label{Alexander Gradings Link Lemma}
The spectral sequence from $E^1 = \widetilde{\mathit{HFL}}(\widetilde{\mathcal D}) \otimes \mathbb Z_2((\theta))$ to $E^{\infty} \cong \widetilde{\mathit{HFL}}(\mathcal D) \otimes \mathbb Z_2((\theta))$ splits along Alexander gradings. For $a \in \mathbb Z, b \in \mathbb Z+\frac{1}{2}$, the sequences sends the gradings $(A_1, A_2) = (\frac{1}{2} + 2a, b)$ on the $E^{1}$ page to the gradings $(A_1, A_2) = (\frac{1}{2} + a, b)$ on the $E^{\infty}$ page and kills all other gradings. In particular there is a rank inequality
\begin{align*}
rk\left(\widetilde{\mathit{HFL}}\left(\widetilde{\mathcal D}, \left(\frac{1}{2} +2a, b\right)\right)\right) \geq rk\left(\widetilde{\mathit{HFL}}\left(\mathcal D, \left(\frac{1}{2}+a, b\right)\right)\right).
\end{align*}
\end{lemma}
By Lemma \ref{Gradings Shift Lemma}, computing the knot Floer homology complex using $\partial_U$ yields a downward shift in Alexander gradings by $\frac{\ell k(\widetilde{K}, U)}{2}$ on the $E^1$ page and $\frac{\ell k(K, U)}{2}$ on the $E^{\infty}$ page. Since both of these numbers are $\frac{\lambda}{2}$, we obtain an overall downward shift of $\frac{\lambda}{2}$ between that link and knot Floer homology spectral sequences, leading to the following lemma.
\begin{lemma} \label{Alexander Gradings Knot Lemma}
The spectral sequence from $E^1 = \widetilde{\mathit{HFK}}(\widetilde{\mathcal D}) \otimes \mathbb Z_2((\theta))$ to $E^{\infty} \cong \widetilde{\mathit{HFK}}(\mathcal D) \otimes \mathbb Z_2((\theta))$ splits along the Alexander grading. For $a \in \mathbb Z$, the sequences sends the gradings $A_1 = \frac{1 -\lambda}{2} + 2a$ on the $E^{1}$ page to the gradings $A_1 = (\frac{1-\lambda}{2} + a)$ on the $E^{\infty}$ page and kills all other gradings. In particular there is a rank inequality
\begin{align*}
rk\left(\widetilde{\mathit{HFK}}\left(\widetilde{\mathcal D}, 2a + \frac{1 - \lambda}{2}\right)\right) \geq rk\left(\widetilde{\mathit{HFK}}\left(\mathcal D, a + \frac{1 - \lambda}{2}\right)\right).
\end{align*}
\end{lemma}
Having fixed the Alexander gradings in the spectral sequence, we may provide a proof of Corollary \ref{Genus Inequality Corollary} (Edmonds' Condition) from \ref{Knot Floer Homology Spectral Sequence}.
\begin{proof} [Proof of Corollary \ref{Genus Inequality Corollary}]
Once again, let $V_1$ denote a two-dimensional vector space over $\mathbb F_2$ whose two sets of gradings $(M, (A_1, A_2))$ are $(0,(0,0))$ and $(-1, (-1, 0))$, and likewise let $W$ be a two-dimensional vector space over $\mathbb F_2$ whose two sets of gradings $(M, (A_1,A_2))$ are $(0,(0,0))$ and $(-1,(0, 0))$. By Theorem \ref{Knot Floer Homology Spectral Sequence}, there is a spectral sequence whose $E^1$ page is $\widehat{\mathit{HFK}}(S^3, \widetilde{K}) \otimes V_1^{\otimes n_1-1} \otimes W \otimes \mathbb Z_2((\theta))$ to a theory whose $E^{\infty}$ page is $\mathbb Z((\theta))$-isomorphic to $\widehat{\mathit{HFK}}(S^3, K) \otimes V_1^{\otimes 2n_1-1} \otimes W \otimes \mathbb Z((\theta))$. Moreover, this spectral sequence splits along the $A_1$ grading, and by Lemma \ref{Alexander Gradings Knot Lemma} the subgroup of the $E^1$ page in $A_1$ grading $\frac{1-\lambda}{2} + 2a$ is carried to the subgroup of the $E^{\infty}$ page in grading $\frac{1-\lambda}{2} +a$. Moreover, the top $A_1$ grading on the $E^1$ page is $g(\widetilde{K})$ and the top $A_1$ grading on the $E^{\infty}$ page is $g(K)$. Since there must be something on the $E^1$ page in the $A_1$ grading which converges to the $A_1$ grading $g(K)$ on the $E^{\infty}$ page, we have the following inequality.
\begin{align*}
g(\widetilde{K}) - \frac{1-\lambda}{2} &\geq 2\left(g(K) - \frac{1 - \lambda}{2}\right) \\
\text{i.e. } g(\widetilde{K}) &\geq 2g(K) + \frac{\lambda - 1}{2} \qedhere
\end{align*}
\end{proof}
Finally, let us prove Corollary \ref{Fiberedness Corollary}.
\begin{proof}[Proof of Corollary \ref{Fiberedness Corollary}]
Suppose Edmonds' condition is sharp, that is, that $g(\widetilde{K}) = 2g(K) + \frac{\lambda - 1}{2}$. Recall that the spectral sequence from $E^1 \cong \widetilde{\mathit{HFK}}(\widetilde{\mathcal D}) \otimes \mathbb Z_2((\theta))$ to $E^{\infty} \cong \widetilde{\mathit{HFK}}(\mathcal D) \otimes \mathbb Z_2((\theta))$ in general sends the Alexander grading $2a + \frac{1-\lambda}{2}$ to the Alexander grading $a+\frac{1-\lambda}{2}$. Therefore it sends the Alexander grading $g(\widetilde{K}) = 2g(K) + \frac{\lambda -1}{2} = 2g(K) + \frac{\lambda-1}{2}) + \frac{1-\lambda}{2}$ to the Alexander grading $g(K)$. That is, sharpness of Edmonds' condition exactly says that the top Alexander grading on the $E^1$ page is not killed in the spectral sequence.
\medskip
Suppose now that $\widetilde{K}$ is fibered. Then the top Alexander grading of $\widetilde{\mathit{HFK}}(\widetilde{\mathcal D}) \otimes \mathbb Z_2((\theta)) = (\widehat{\mathit{HFK}}(S^3, L) \otimes V_1^{\otimes (2n_1-1)}\otimes W) \otimes \mathbb Z_2((\theta))$ has rank two as a $\mathbb Z_2((\theta))$ module. (The knot Floer homology of fibered knots is monic in the top Alexander grading by the forward direction of Lemma \ref{Fiberedness Condition Lemma} , and the factor of $W$ doubles the number of entries in each Alexander grading.) Since this Alexander grading is not killed in the spectral sequence, the top Alexander grading of $\widetilde{\mathit{HFK}}(\mathcal D) \otimes \mathbb Z_2((\theta))=\widehat{\mathit{HFK}}(S^3, L) \otimes V_1^{\otimes (n_1-1)} \otimes W) \otimes \mathbb Z_2((\theta))$ also has rank two as a $\mathbb Z_2((\theta))$-module. Therefore $K$ is also fibered.\end{proof}
\begin{remark}
The converse of Corollary \ref{Fiberedness Corollary}, that when Edmonds' condition is sharp, the quotient knot $K$ being fibered implies $\widetilde{K}$ is fibered, is false. Consider the following counterexample: the knot $\widetilde{K}=10_{144}$ is doubly periodic with quotient knot $K = 3_1$, the trefoil. The linking number $\lambda = \ell k(\widetilde{K}, U) = 1$. Since $g(10_{144}) = 2$ and $g(3_1)=1$, we see that $g(\widetilde{K}) = 2 = 2g(K) + \frac{\lambda-1}{2}$. Therefore Edmonds' condition is sharp. However, the trefoil is fibered, whereas $10_{144}$ is not.
\end{remark}
\section{Spectral Sequences for Lagrangian Floer Cohomology} \label{Floer Cohomology Section}
Floer cohomology is an invariant for Lagrangian submanifolds in a symplectic manifold introduced by Floer \cite{MR965228, MR933228, MR948771}. Many versions of the theory exist; in this section we briefly introduce Seidel and Smith's setting for Floer cohomology before stating the hypotheses and results of their main theorem for equivariant Floer cohomology. Let $M$ be a manifold equipped with an exact symplectic form $\omega = d\theta$ and a compatible almost complex structure $J$. Let $L_0$ and $L_1$ be two exact Lagrangian submanifolds of $M$. For our purposes we can restrict to the case that $L_0$ and $L_1$ intersect transversely.
\begin{definition}
The Floer chain complex $\mathit{CF}(L_0,L_1)$ is a $\mathbb Z_2$-vector space with generators the finite set of points $L_0 \cap L_1$.
\end{definition}
The differential $d$ on $\mathit{CF}(L_0,L_1)$ counts holomorphic disks whose boundary lies in $L_0 \cup L_1$ which run from $x_-$ to $x_+$. More precisely, we choose ${\bf J} = J_t$ a time-dependent perturbation of $J$ and consider Floer trajectories $u$ of the following form.
\begin{align*}
u: \mathbb R \times [0,1] \rightarrow M \\
u(s,0) \in L_0, u(s,1) \in L_1 \\
\partial_s u + J_t\partial_t(u) =0 \\
\lim_{s\rightarrow \pm \infty} = x_{\pm} \\
\end{align*}
The moduli space of such maps carries a natural action by $\mathbb R$ corresponding to translation on the coordinate $s$; we let the quotient by this action be $\mathcal M(x_{-}, x_{+})$, and the compactification of this space be $\widehat{\mathcal M}(x_{-}, x_{+})$.
\medskip
We impose one further technical condition on $M$ to ensure both that there are finitely many equivalence classes of holomorphic curves between any two intersection points $x_+,x_{-} \in L_0 \cap L_1$ and that the image of any holomorphic curve $u:\mathbb R \times [0,1] \rightarrow M$ is contained in some compact set in $M$. We say $\phi: M \rightarrow \mathbb R$ is \textit{exhausting} if it is smooth, proper, and bounded below. We consider the one-form $d^{\mathbb C}(\phi) = d\phi \circ J$ and the two-form $\omega_{\phi} = -dd^{\mathbb C}(\phi)$. We say that $\phi$ is \textit{$J$-convex} or \textit{plurisubharmonic} if $\omega$ is compatible with the complex structure on $M$, that is, if $\omega(Jv,Jw) = \omega(v,w)$ and $\omega(v,Jv) > 0$ for all $v,w \in TM$. This ensures that $\omega_{\phi}$ is a symplectic form on $M$.(The term plurisubharmonic indicates that the restriction of $\phi$ to any holomorphic curve in $M$ is subharmonic, hence satisfies the maximum modulus principle.) A noncompact symplectic manifold $M$ with this structure is called \textit{convex at infinity}.
\medskip
We use an index condition to determine which strips $u$ count for the differential. Given any $u
\in \mathcal M (x_-,x_+)$, we can associate to $u$ a Fredholm operator $D_{{\bf J}}u: \mathcal W^1_u \to \mathcal W^0_u$ from $\mathcal W^1_u = \{X \in W^{1,p}(u^*TM) : X(\cdot, 0) \in u^*TL_0, X(\cdot, 1) \in u^*TL_1\}$ to $W^0_u$. (Here $p>2$ is a fixed real number.) This operator describes the linearization of Floer's equation, $\partial_s u + J_t(u)\partial_t(u) =0$, near $u$. We say that ${\bf J}$ is \textit{regular} if $D_{{\bf J}} u$ is surjective for all finite energy holomorphic strips $u$.
\begin{definition}
The index of the operator $D_{{\bf J}} u$ is the \textit{Maslov index} of $u$.
\end{definition}
\begin{lemma}
If $M$ is an exact symplectic manifold with a compatible almost convex structure $J$ which is convex at infinity and $L_0,L_1$ are exact Lagrangian submanifolds also convex at infinity, then a generic choice of ${\bf J}$ perturbing $J$ is regular.
\end{lemma}
Floer's original proof of this result \cite[Proposition 2.1]{MR965228} and Oh's revision \cite[Proposition 3.2]{MR1223659} were for compact manifolds, but, as observed in \cite[Section 9.2]{MR2045629} and indeed by Sikorav \cite{Sikorav} in his review of Floer's paper \cite{MR965228}, the proof carries through identically for noncompact manifolds which are convex at infinity. Choose such a generic regular ${\bf J}$. We let $\mathcal M_1(x_{-}, x_{+})$ be the set of classes of trajectories $u$ in $\mathcal M(x_{-},x_{+})$ such that the Fredholm index of $D_{{\bf J}}u$ is $1$.
\begin{lemma}\cite[Lemma 3.2]{MR965228}
If ${\bf J}$ is regular, then $\mathcal M_1(x_{-},x_{+})$ is a smooth, compact 0-manifold such that $\#\widehat{ \mathcal M}_1(x_{-}, x_{+})$ is finite. Moreover, for any $x_-, x_+ \in \mathit{CF}(L_0,L_1)$, the sum
\[
\sum_{x \in \mathit{CF}(L_0,L_1)}\#\widehat{ \mathcal M}_1(x_{-}, x)\#\widehat{ \mathcal M}_1(x_, x_{+})
\]
\noindent is zero modulo two.
\end{lemma}
Therefore we make the following definition.
\begin{definition} \label{FloerCohomologyDefn}\cite[Defn 3.2]{MR965228}
The Floer cohomology $\mathit{HF}(L_0,L_1)$ is the homology of $\mathit{CF}(L_0,L_1)$ with respect to the differential
\begin{align}
\delta(x_{-}) = \sum_{x_+ \in \mathit{CF}(L_0,L_1)} \# \widehat{\mathcal M_1}(x_{-},x_{+})x_+ \label{differential}
\end{align}
\noindent with respect to a regular family of almost complex structures ${\bf J}$ perturbing $J$.
\end{definition}
Now suppose that $M$ carries a symplectic involution $\tau$ preserving $(M, L_0, L_1)$ and the forms $\omega$ and $\theta$. Let the submanifold of $M$ fixed by $\tau$ be $M^{\text{inv}}$, and similarly for $L_i^{\text{inv}}$ for $i=0,1$. We can define the Borel (or equivariant) cohomology of $(M, L_0, L_1)$ with respect to this involution. Seidel and Smith give a geometric description of the cochain complexes used to produce equivariant Floer cohomology; we'll content ourselves with an algebraic description, referring the reader to their paper \cite[Section 3]{MR2739000} for further geometric detail. Notice that the usual Floer chain complex $\mathit{CF}(L_0, L_1)$ carries an induced involution $\tau^{\#}$ which takes an intersection point $x \in L_0 \cap L_1$ to the intersection point $\tau(x) \in L_0 \cap L_1$. This map $\tau^{\#}$ is not a chain map with respect to a generic family of complex structures on $M$. However, suppose that we are in the nice case that we can find a suitable family of complex structures $J$ on $M$ such that $\tau^{\#}$ commutes with the differential on $\mathit{CF}(L_0, L_1)$. (Part of Seidel and Smith's use of their technical conditions on the bundle $\Upsilon(M^{\text{inv}})$ is to establish that such a $J$ exists \cite[Lemma 19]{MR2739000}.) Then $\mathit{CF}(L_0, L_1)$ is a chain complex over $\mathbb F_2[\mathbb Z_2] = \mathbb F_2[\tau^{\#}]/\langle (\tau^{\#})^2 = 1\rangle$. Indeed, $(1 + \tau^{\#})^2 = 0$, so there is a chain complex
$$
\xymatrix{
0 \rightarrow \mathit{CF}(L_0, L_1) \ar[r]^-{1 + \tau^{\#}} & \mathit{CF}(L_0, L_1) \ar[r]^-{1 + \tau^{\#}} & \mathit{CF}(L_0, L_1)\cdots
}
$$
\begin{definition}
If $\mathit{CF}(L_0, L_1)$ is the Floer chain complex and $\tau^{\#}$ is a chain map with respect to the complex structure on $M$, $\mathit{HF}_{\text{borel}}(L_0, L_1)$ is the homology of the complex $\mathit{CF}(L_0,L_1) \otimes \mathbb Z[[\theta]]$ with respect to the differential $\delta + (1 + \tau^{\#})q$.
\end{definition}
Therefore the double complex
\label{doublecomplex}
$$
\xymatrix{
0 \rightarrow \mathit{CF}(L_0,L_1) \ar@(ur,ul)[]_-{\delta} \ar[r]^-{1+\tau^{\#}} & \mathit{CF}(L_0, L_1) \ar[r]^-{1+\tau^{\#}} \ar@(ur,ul)[]_-{\delta} & \mathit{CF}(L_0, L_1)\cdots \ar@(ur,ul)[]_-{\delta}
}
$$
\noindent induces a spectral sequence whose first page is $\mathit{HF}(L_0,L_1) \otimes \mathbb Z_2[[\theta]]$ and which converges to $\mathit{HF}_{\text{borel}}(L_0,L_1)$.
\medskip
We also have the following more algebraic definition of equivariant Floer cohomology. The following lemma is well-known; a proof may be found in \cite[Section 2]{Hendricks}.
\begin{lemma} \label{homspace}
\noindent The equivariant Floer cohomology $\mathit{HF}_{\text{borel}}(L_0,L_1)$ is isomorphic to
\[
Ext_{\mathbb F_2[\mathbb Z_2]}(\mathit{CF}(L_1,L_0), \mathbb F_2).
\]
\end{lemma}
Here we regard $\mathbb F_2$ as the trivial module over $\mathbb F_2[\mathbb Z_2]$.
\medskip
Seidel and Smith's result concerns the existence of a localization map from $\mathit{HF}_{\text{borel}} (L_0, L_1)$ to $\mathit{HF}(L_0^{\text{inv}}, L_1^{\text{inv}})$, where the second space is the Floer cohomology of the two Lagrangians $L_0^{\text{inv}}$ and $L_1^{\text{inv}}$ in $M^{\text{inv}}$. The main goal is to produce a family of $\tau$-invariant complex structures on $M$ such that, for $u: \mathbb R \times [0,1] \rightarrow M^{\text{inv}}$, the Maslov index of $u$ in $M^{\text{inv}}$ differs from the Maslov index of $u$ in $M$ by a constant.
\medskip
Consider the normal bundle $N(M^{\text{inv}})$ to $M^{\text{inv}}$ in $M$ and its Lagrangian subbundles $N(L_i^{\text{inv}})$ the normal bundles to each $L_i^{\text{inv}}$ in $L_i$. The construction requires one additional degree of freedom, achieved by pulling back the bundle $N(M^{\text{inv}})$ along the projection map $M^{\text{inv}} \times [0,1] \rightarrow M^{\text{inv}}$. Call this pullback $\Upsilon(M^{\text{inv}})$. This bundle is constant with respect to the interval $[0,1]$. Its restriction to each $M^{\text{inv}} \times \{t\}$ is a copy of $N(M^{\text{inv}})$ which will occasionally, by a slight abuse of notation, be called $N(M^{\text{inv}}) \times \{t\}$; similarly, for $i=0,1$ the copy of $N(L_i^{\text{inv}})$ above $L_i^{\text{inv}} \times \{t\}$ will be referred to as $N(L_i^{\text{inv}}) \times \{t\}$.
\medskip
We make a note here of the correspondence between our notation and Seidel and Smith's original usage. Our bundle $\Upsilon(M^{\text{inv}})$ is their $TM^{\text{anti}}$; while our $N(L_0^{\text{inv}}) \times \{0\}$ is their $TL_0^{\text{inv}}$ and our $N(L_1^{\text{inv}}) \times \{1\}$ is their $TL_1^{\text{anti}}$. (The name $TL_1^{\text{anti}}$ is also used for the bundle that we denote $N(L_1^{\text{inv}}) \times \{0\}$, using the obvious isomorphism between the bundles.)
\medskip
\begin{definition} \label{stablenormaltriv} \cite[Defn 18]{MR2739000} A stable normal trivialization of the vector bundle $\Upsilon(M^{\text{inv}})$ over $M^{\text{inv}} \times [0,1]$ consists of the following data.
\begin{itemize}
\item A stable trivialization of unitary vector bundles $\phi: \Upsilon(M^{\text{inv}}) \oplus \mathbb C^{K} \rightarrow \mathbb C^{k_{\text{anti}} + K}$ for some $K$.
\item A Lagrangian subbundle $\Lambda_0 \subset (\Upsilon(M^{\text{inv}}))|_{[0,1] \times L^{\text{inv}}_0}$ such that $\Lambda_0|_{\{0\} \times L^{\text{inv}}_0} = (N(L_0^{\text{inv}})\times \{0\})\oplus \mathbb R^K$ and $\phi(\Lambda_0|_{\{1\} \times L_0^{\text{inv}}}) = \mathbb R^{k_{\text{anti}} + K}$.
\item A Lagrangian subbundle $\Lambda_1 \subset (\Upsilon(M^{\text{inv}}))|_{[0,1] \times L^{\text{inv}}_1}$ such that $\Lambda_1|_{\{0\} \times L^{\text{inv}}_1} = (N(L_1^{\text{inv}})\times\{0\})\oplus \mathbb R^K$ and $\phi(\Lambda_1|_{\{1\} \times L_1^{\text{inv}}}) = i\mathbb R^{k_{\text{anti}} + K}$.
\end{itemize}
\end{definition}
The crucial theorem of \cite{MR2739000}, proved through extensive geometric analysis and comparison with the Morse theoretic case, is as follows.
\begin{theorem} \label{Localization} \cite[Thm 20]{MR2739000} If $\Upsilon(M^{\text{inv}})$ carries a stable normal trivialization, then $\mathit{HF}_{\text{borel}}(L_0,L_1)$ is well-defined and there are localization maps
\[
\Delta^{(m)}: \mathit{HF}_{\text{borel}} \rightarrow \mathit{HF}(L_0^{\text{inv}}, L_1^{\text{inv}})[[\theta]]
\]
\noindent defined for $m>>0$ and satisfying $\Delta^{(m+1)} = \theta\Delta^{(m)}$. Moreover, after tensoring over $\mathbb Z_2[[\theta]]$ with $\mathbb Z_2((\theta))$ these maps are isomorphisms.
\end{theorem}
This implies Theorem \ref{SeidelSmith}.
\medskip
We can in fact dispense with the symplectic structure and work on the level of the complex normal bundle $\Upsilon(M^{\text{inv}})$ with its totally real subbundles $NL_0^{\text{inv}} \times \{1\}$ and $J(NL_1^{\text{inv}} \times \{1\})$. The following lemma is mentioned in \cite[Section 3d]{MR2739000}; a detailed proof is laid out in \cite[Proposition 7.1]{Hendricks}.
\begin{lemma} \label{Nullhomotopy Lemma}
The existence of a stable normal trivialization of $(M, L_0, L_1)$ is implied by the existence of a nullhomotopy of the map
\begin{align*}
(M, L_0, L_1) \rightarrow (BU, BO)
\end{align*}
\noindent which classifies the complex normal bundle $\Upsilon(M^{\text{inv}}) = NM^{\text{inv}} \times [0,1]$ and its totally real subbundles $NL_0^{\text{inv}} \times \{0\}$ over $L_0^{\text{inv}} \times \{0\}$ and $J(NL_1^{\text{inv}}) \times \{1\}$ over $L_1^{\text{inv}} \times \{1\}$.
\end{lemma}
Let $\widetilde{\mathcal D} = (S^2, \widetilde{\boldsymbol \alpha}, \widetilde{\boldsymbol \beta}, \widetilde{\bf w}, \widetilde{\bf z})$ be a multipointed Heegaard diagram for $\widetilde{K}$ defined using the method of Section \ref{Periodic Knots Section}, and $\mathcal D = (S^2, {\boldsymbol \alpha}, {\boldsymbol \beta}, {\bf w}, {\bf z})$ be its quotient under the involution $\tau$. Given $x$ a point on $D$, let $x^1,x^2$ be its two lifts to $\widetilde{\mathcal D}$ in some order. There is a natural map
\begin{align*}
\iota:\text{Sym}^{n_1}(S^2) &\rightarrow \text{Sym}^{2n_1}(S^2) \\
(x_1\cdots x_{n_1}) &\mapsto (x_1^1x_1^2\cdots x_{n_1}^1x_{n_1}^2).
\end{align*}
This map is a holomorphic embedding; for a proof, see \cite[Appendix 1]{Hendricks}. Moreover, consider the induced involution on $\text{Sym}^{2n_1}(S^2)$, which through a slight abuse of notation we will also call $\tau$. The fixed set of $\tau$ is exactly our embedded copy of $\text{Sym}^{n_1}(S^2)$; moreover, $\tau$ preserves the two tori $\mathbb T_{\widetilde{\boldsymbol \alpha}}$ and $\mathbb T_{\widetilde{\boldsymbol \beta}}$, with fixed sets $T_{\widetilde{\boldsymbol \alpha}}^{\text{inv}} = \mathbb T_{\boldsymbol \alpha}$ and $T_{\widetilde{\boldsymbol \beta}}^{\text{inv}} = \mathbb T_{\boldsymbol \beta}$.
\medskip
Perutz has shown that for an arbitrary Heegaard diagram $D = (S, \boldsymbol \alpha, \boldsymbol \beta, {\bf w,z})$, there is a symplectic form $\omega$ on $\text{Sym}^{g+n-1}(S)$ which is compatible with the complex structure induced by a complex structure on $S$, and with respect to which the submanifolds $\mathbb T_{\alpha}$ and $\mathbb T_{\beta}$ are in fact Lagrangian and the various Heegaard Floer homology theories are their Lagrangian Floer cohomologies \cite[Thm 1.2]{MR2509747}. In particular, the knot Floer homology is the Floer cohomology of these two tori in the ambient space $\text{Sym}^{g+n-1}(S \backslash \{{\bf z,w}\})$, where the removal of the basepoints accounts for the restriction that holomorphic curves not be permitted to intersect the submanifolds $V_{w_i}$ and $V_{z_j}$ of the symmetric product.
\begin{proposition}
There is a symplectic structure on $\text{Sym}^{g-n-1}(S^2 \backslash \{{\bf w,z}\})$ with respect to which the submanifolds $\mathbb T_{\boldsymbol \alpha}$ and $\mathbb T_{\boldsymbol \beta}$ are Lagrangian and
\[
\widetilde{\mathit{HFL}}(D) \cong \widehat{\mathit{HFK}}((S^3,K))\otimes V_1^{\otimes (n_1-1)}\otimes\cdots\otimes V_{\ell}^{\otimes (n_{\ell}-1)} \cong \mathit{HF}(\mathbb T_{\boldsymbol \beta}, \mathbb T_{\boldsymbol \alpha}).
\]
\end{proposition}
This is essentially Theorem 1.2 of \cite{MR2509747}; the proof is identical safe for trivial notation adjustments for Heegaard diagrams with multiple basepoints. From now on we will work with this method of computing knot Floer homology; in Section \ref{Geometry Section} we will show that the symplectic form produced by Perutz's construction meets the requirements of Seidel and Smith's theorem.
\medskip
In order to apply Theorem \ref{Localization} to the case of doubly periodic knots, we will work with three different subspaces of $\text{Sym}^{2n_1}(S^2)$ and their fixed sets under the involution $\tau$, as follows.
\begin{align*}
M_0 &= \text{Sym}^{2n_1}(S^2 \backslash \{\widetilde{{\bf w}}\}) &&
M_0^{\text{inv}} = \text{Sym}^{n_1}(S^2 \backslash \{{\bf w}\}) \\
M_1 &= \text{Sym}^{2n_1}(S^2 \backslash \{{\bf \widetilde{w}}, {\bf \widetilde{z}}-z_0\}) &&
M_1^{\text{inv}} = \text{Sym}^{n_1}(S^2 \backslash \{{\bf w}, {\bf z}-z_0 \}) \\
M_2 &= \text{Sym}^{2n_1}(S^2 \backslash \{\widetilde{\bf w}, \widetilde{\bf z}\}) &&
M_2^{\text{inv}}= \text{Sym}^{n_1}(S^2 \backslash \{{\bf w,z}\})
\end{align*}
\noindent In all cases the Lagrangians and their invariant sets under the involution will be as follows.
\begin{align*}
L_0 = \mathbb T_{\widetilde{\boldsymbol \beta}} && L_0^{\text {inv}} = \mathbb T_{\boldsymbol \beta} \\
L_1 = \mathbb T_{\widetilde{\boldsymbol \alpha}} && L_1^{\text {inv}} = \mathbb T_{\boldsymbol \alpha}
\end{align*}
The following is immediate from the definitions.
\begin{lemma}
With respect to our choice of symplectic manifolds $M_i$ for $i=1,2,3$ and Lagrangians $L_0$ and $L_1$, we have the following Floer cohomology groups.
\noindent In $M_1$,
\begin{align*}
\mathit{HF}(L_0,L_1) &= \mathit{HF}(\mathbb T_{\widetilde{\boldsymbol \beta}}, \mathbb T_{\widetilde{\boldsymbol \alpha}}) = \widehat{\mathit{HFL}}(S^3, \widetilde{K} \cup U) \otimes V^{\otimes (2n_1-1)}.\\
\mathit{HF}(L_0^{\text{inv}}, L_1^{\text{inv}})&=\mathit{HF}(\mathbb T_{\boldsymbol \beta}, \mathbb T_{\boldsymbol \alpha}) = \widehat{\mathit{HFL}}(S^3, K \cup U) \otimes V^{\otimes (n_1-1)}.
\end{align*}
\noindent In $M_2$,
\begin{align*}
\mathit{HF}(L_0,L_1)&=\mathit{HF}(\mathbb T_{\widetilde{\boldsymbol \beta}}, \mathbb T_{\widetilde{\boldsymbol \alpha}}) = \widehat{\mathit{HFK}}(S^3, \widetilde{K})\otimes V^{\otimes (2n_1-1)}\otimes W.\\
\mathit{HF}(L_0^{\text{inv}},L_1^{\text{inv}})&=\mathit{HF}(\mathbb T_{\boldsymbol \beta}, \mathbb T_{\boldsymbol \alpha}) = \widehat{\mathit{HFK}}(S^3,K) \otimes V^{\otimes (n_1-1)}\otimes W.
\end{align*}
\end{lemma}
We will prove in Section \ref{Stable Normal Triv Section} that the space $(M_0^{\text{inv}}, L_0^{\text{inv}}, L_1^{\text{inv}})$ carries a stable normal trivialization; this then naturally restricts to a stable normal trivialization of $(M_i^{\text{inv}}, L_0^{\text{inv}}, L_1^{\text{inv}})$ for $i=1,2$. (These manifolds will carry slightly different symplectic structures, but by Lemma \ref{Nullhomotopy Lemma} we need only be concerned with their complex structures, which will be determined by the inclusion $M_i^{\text{inv}} \subset M_0^{\text{inv}}$.) In some moral sense, this is the correct level of generality: the most important feature of our punctured Heegaard diagram is that no periodic domain has nonzero index, and ${\bf w}$ (or ${\bf z}$) is the smallest set of points at which we may puncture $S^2$ and produce a diagram for which this property holds. Moreover, $M_0^{\text{inv}}$ conveniently deformation retracts onto each of $\mathbb T_{\boldsymbol \alpha}$ and $\mathbb T_{\boldsymbol \beta}$, making certain cohomology computations in Sections \ref{Geometry Section} and \ref{Stable Normal Triv Section} cleaner than they might otherwise be.
\medskip
An important note is that the localization map of Theorem \ref{Localization} is entirely constructed by counting pseudoholomorphic disks of varying index and by multiplication and division by powers of $\theta$. In particular, on $M_1^{\text{inv}}$, the localization isomorphism preserves the Alexander multigrading $(A_1,A_2)$ on th $E^{\infty}$ page, because no flowlines can pass over the missing basepoint divisors $V_{z_i}$ and $V_{w_i}$ for $0\leq i\leq n_1$, and on $M_2^{\text{inv}}$, the localization isomorphism preserves the Alexander grading $A_1$ since flowlines can pass over $V_{z_0}$ and $V_{w_0}$ but no other basepoint divisors. In general, however, we should expect that these isomorphisms will not preserve the data of the Maslov grading.
\section{The Geometry of the Symmetric Product} \label{Geometry Section}
It remains to be verified that $M_0^{\text{inv}}$, $M_1^{\text{inv}}$, and $M_2^{\text{inv}}$ satisfy the basic symplectic structural requirements of Seidel and Smith's theory. To wit, we must see that these manifolds can be equipped with triples $(j_i, \omega_i, \theta_i)$ for $i=1,2,3$ such that $\omega_i = d \theta_i$, $\theta_i|_{L_k^{\text{inv}}}$ is exact for $k=0,1$, and moreover the complex structure $j_i$ is convex at infinity.
\medskip
The proof closely follows the methods of \cite[Section 4]{Hendricks}. Give $S^2 \backslash \{w_0\}$ a complex structure $j$ by identifying it with the complex plane (such that $z_0$ lies at zero). This induces a complex structure $\text{Sym}^{n_1}(j)$ on $\text{Sym}^{n_1}(S^2 \backslash \{w_0\})$ which restricts to complex structures $j_i = \text{Sym}^{n_1}(j)|_{M_i^{\text{inv}}}.$ For $i = 0,1,2$, let ${\bf r_i}$ be the finite set of points on $S^2$ such that $M_i = \text{Sym}^{2n_1}(S^2 \backslash \{{\bf r_i}\})$. We will show that $M_i$ is convex at infinity and can be equipped with a symplectic form which is $\tau$-invariant and with respect to which $\mathbb T_{\boldsymbol \alpha}$ and $\mathbb T_{\boldsymbol \beta}$ are exact Lagrangians.
\medskip
Let $f_i$ be an exhausting function on $S^2 \backslash \{{\bf r_i}\}$ as follows. Let $x = u+iv$ be a complex number.
\begin{align*}
f_i: S^2 \backslash \{{\bf r_i}\} \rightarrow \mathbb R \\
x \mapsto C_i |x|^2 + \sum_{j=1}^{k_i} \frac{1}{|x-r_j|^2}
\end{align*}
Here $C_i>0$ is a constant determined as follows. The exact symplectic form associated to $f_i$ is
\begin{align*}
\omega_{f_i} = -dd^{\mathbb C_i}(f_i) = \left(4C_i + \sum_{j=1}^{k_i} \frac{4}{|x-r_i|^4}\right) du\wedge dv.
\end{align*}
The $\alpha$ and $\beta$ curves on $S^2$ are certainly Lagrangian with respect to the symplectic form $\omega_{f_i}$, but we require them to moreover be exact; that is, we wish to ensure that $\int_{\alpha_j^k} -d^{\mathbb C}(f_i) = 0$ for all $\alpha_j^k$ on $S^2 \backslash \{{\bf r_i}\}$. We choose $C_i$ large enough that the integral $\int_{\alpha_j} -d^{\mathbb C}f_i > 0$ for $1\leq j\leq n_1$, and similarly for each $\beta_j$. Then we may isotope the $\alpha$ and $\beta$ curves inward toward the punctures ${\bf w}$ (around which the curves are oriented counterclockwise) until we have a new, isotopic set of curves for which $\int_{\alpha_j} -d^{\mathbb C}f_i = 0 = \int_{\alpha_j} -d^{\mathbb C}f_i$. Hence we have arranged that the $\alpha$ and $\beta$ curves are exact Lagrangians with respect to $\omega_{f_i}$.
\medskip
On the product space $(S^2 \backslash \{{\bf r_i}\})^{2n_1}$ there is a corresponding exhausting function defined as follows. Let $p_k$ be the projection of the product to its $k$th factor, and let $\widetilde{f_i} = f_i \circ p_1 + \cdots + f_i \circ p_{2n_1}$. Notice that $\text{Sym}^{2n_1}(j_i)$-convexity, properness, and boundedness of $\widetilde{f_i}$ follow from the corresponding properties of $f_i$. The symplectic form we obtain from $\widetilde{f}_i$ is $\omega_{\widetilde{f}_i} = \omega_{f_i} \otimes 1 \cdots \otimes 1 + 1 \otimes \omega_{f_i} \otimes 1 \otimes\cdots \otimes 1 +\cdots+1\otimes\cdots\otimes \omega_{f_i}$. Therefore any of the $(2n_1)!$ lifts of $\mathbb T_{\widetilde{\boldsymbol \alpha}}$ and $\mathbb T_{\widetilde{\boldsymbol \beta}}$ is an exact Lagrangian submanifold with respect to $\omega_{\widetilde{f_i}}$.
\medskip
Finally, consider the symmetric product $\text{Sym}^{2n_1}(S^2 \backslash \{{\bf r_i}\})$. There is a (possibly singular) continuous function
\begin{align*}
\psi_i: \text{Sym}^{2n_1}(S^2 \backslash \{{\bf r_i}\}) &\rightarrow \mathbb R \\
(x_1\cdots x_{2n_1}) &\mapsto \sum_{\sigma \in S_{2n_1}} \widetilde{f_i}(x_{\sigma(1)},\cdots,x_{\sigma(2n_1)})
\end{align*}
\noindent which is smooth outside a neighborhood of the fat diagonal $\{(x_1\cdots x_{2n_1}) \in \text{Sym}^{2n_1}(S^2 \backslash \{{\bf r_i}\}): x_k = x_j \text{ for some } k\neq j\}$. Perutz observes \cite{MR2509747} that this function is strictly plurisubharmonic in the sense of non-smooth functions, that is, that the two-current $-dd^{\mathbb C}\psi_i$ is strictly positive. This gives us a continuous exhausting function on $\text{Sym}^{2n-2}(\Sigma(S^2) \backslash \{{\bf w,z}\})$. We may apply the following lemma of Richberg \cite{MR0222334}, quoted in \cite[Lemma 3.10]{SteinBook}.
\begin{lemma}
Let $\psi$ be a continuous $J$-convex function on an integrable complex manifold $V$. Then for every positive function $h:V \rightarrow \mathbb R_+$, there exists a smooth $J$-convex function $\psi'$ such that $|\psi(x) - \psi'(x)| < h(x)$. If $\phi$ is already smooth on a neighborhood of a compact subset $A$, then we can achieve $\phi = \phi'$ on $A$.
\end{lemma}
In particular, we may take $h:\text{Sym}^{2n-2}(\Sigma(S^2) \backslash \{{\bf w,z}\})\rightarrow \mathbb R_+$ to be a constant function $h(x) = \epsilon$, and apply the lemma to our map $\psi_i$ and $h$. Then we may produce $\psi_i':\text{Sym}^{2n_1}(S^2 \backslash \{{\bf r_i}\}) \rightarrow \mathbb R$ such that $|\psi_i'(x) - \psi_i(x)|<\epsilon$ and $\psi_i'$ is smooth and $\text{Sym}^{2n_1}(j_i)$-convex. Moreover, since $\psi_i$ is bounded below and proper, $\psi_i'$ is also, since the two real valued functions differ by at most $\epsilon$. Therefore $\psi_i'$ is an exhausting function on $M_i$, and $M_i$ is convex at infinity.
\medskip
We have yet to produce an appropriate symplectic form on $M_i$, which we will do using work of Perutz \cite{MR2509747}. We begin with a definition.
\begin{definition}\cite[Defn 7.3]{MR2509747} Let $X$ be a complex manifold with complex structure $J$. A \emph{K\"ahler cocycle} on X is a collection $(U_k, \phi_k)_{k \in K}$, where $(U_k)_{k \in K}$ is an open cover of $X$ and $\phi_k: U_k \rightarrow \mathbb R$ is an upper semicontinuous function such that
\begin{itemize}
\item $\phi_k$ is strictly plurisubharmonic
\item $\phi_k - \phi_{\ell}$ is pluriharmonic
\end{itemize}
\end{definition}
If a K\"ahler cocycle $(U_k, \phi_k)_{k \in K}$ is smooth then we can associate to it the symplectic form $\omega$ which is $-dd^{\mathbb C}\phi_k$ on each $U_k$. Notice, for example, that a K\"ahler cocycle can consist of a single smooth plurisubharmonic function on all of $X$, as in the case of the smooth K\"ahler cocycle $((S^2 \backslash \{{\bf r_i}\})^{2n_1}, \widetilde{f}_i)$ on $(S^2 \backslash \{{\bf r_i}\})^{2n_1}$ and the singular K\"ahler cocycle $(M_i, \psi_i)$ on $M_i$.
\medskip
Perutz proves the following technical result.
\begin{lemma} \cite[Lemma 7.4]{MR2509747} Let $(U_k, \phi_k)$ be a continuous K\"ahler cocycle on a complex manifold $X$. Suppose that $X = X_1 \cup X_2$ such that $X_1$ and $X_2$ are open and the functions $\phi_k|_{U_k \cap X_1}$ are smooth. Then there exists a continuous function
\[
\chi: X \rightarrow \mathbb R, \text{ } Supp(\chi) \subset X_2
\]
\noindent and a locally finite refinement
\[
V_{\ell} \subset U_{k(\ell)}
\]
\noindent such that the family $(V_{\ell}, \phi_{k(\ell)}|_{V_{\ell}} + \chi|_{V_{\ell}})$ is a smooth K\"ahler cocycle.
\end{lemma}
Notice that if $(U_k, \phi_k)$ happened to be the K\"ahler cocycle associated to a single $J$-convex function $\phi$ on $X$, then $(V_{\ell}, \phi_{\ell(k)}|_{V_{\ell}} + \chi|_{V_{\ell}})$ is the K\"ahler cocycle associated to the smooth plurisubharmonic function $\phi + \chi$.
\medskip
In our case we take $X$ to be $\text{Sym}^{2n_1}(\Sigma(S) \backslash \{{\bf r_i}\})$, $X_1$ to be the complement of the main diagonal in this symmetric product, and $X_2$ to be a small neighborhood of the main diagonal with no intersection with $\mathbb T_{\widetilde{\boldsymbol \alpha}}$ and $\mathbb T_{\widetilde{\boldsymbol \beta}}$. Then the function $\psi_i: \text{Sym}^{2n_1}(S^2 \backslash \{{\bf r_i}\}) \rightarrow \mathbb R$ admits a smoothing to a $\text{Sym}^{2n_1}(j_i)$-convex function $\psi_i + \chi_i: \text{Sym}^{2n_1}(S^2 \backslash \{{\bf r_i}\}) \rightarrow \mathbb R$ which is equal to $\psi_i$ away from a neighborhood of the large diagonal. The symplectic form $\omega_{\phi_i+ \chi_i}$ is exact and compatible with the complex structure on $M$.
\medskip
Finally, on $\mathbb T_{\widetilde{\boldsymbol \alpha}}$ the map $\chi_i$ is identically $0$ and $\psi_i = (2n_1)!\widetilde{f}_i|_{\alpha^1_1 \times \alpha^2_1 \times\cdots. \times \alpha^1_{n_1} \times \alpha^2_{n_1}}$. Therefore $\omega_{\psi_i+ \chi_i}|_{\mathbb T_{\widetilde{\boldsymbol \alpha}}} = 0$ and $d^{\mathbb C}(\psi_i + \chi_i)|_{\mathbb T_{\widetilde{\boldsymbol \alpha}}} = (2n_1-2)!d^{\mathbb C}(\widetilde{f_i})|_{\alpha^1_1 \times\cdots\times \alpha^2_{n_1}}$ is exact. Ergo $\mathbb T_{\widetilde{\boldsymbol \alpha}}$ is an exact Lagrangian in the exact symplectic manifold $M_i$, and similarly $\mathbb T_{\widetilde{\boldsymbol \beta}}$ is as well.
\medskip
The reader may at this point be alarmed that we have failed to check that $\tau$ is a symplectic involution. We amend this by replacing $\omega_{\psi_i+\chi_i}$ by $\omega_i = \frac{1}{2}(\omega_{\psi_i+\chi_i} + \tau^*\omega_{\psi_i+\chi_i})$. Since $\tau$ is a holomorphic involution, the exact form $\omega_i$ is $\text{Sym}^{2n_1}(j)$-compatible and nondegenerate. Moreover, since $\tau$ preserves $\mathbb T_{\boldsymbol \alpha}$ and $\mathbb T_{\boldsymbol \beta}$, it is still the case that these two submanifolds are exact Lagrangians with respect to $\omega_i$. This $\omega_i$ is our final choice of symplectic form on $M_i$.
\medskip
Let us now proceed to discuss the homotopy type and cohomology of $M_0^{\text{inv}}$, building up the structure we will use in the proof that this manifold satisfies the complex conditions which imply the existence of a stable normal trivialization.
\medskip
We claim $M_0^{\text{inv}}$ deformation retracts onto each of $\mathbb T_{\boldsymbol \alpha}$ and $\mathbb T_{\boldsymbol \beta}$. To check this, we refer to a lemma whose proof is outlined in \cite[Lemma 5.1]{Hendricks} following an argument of Ong \cite{MR1993792}.
\begin{lemma}
The $r$th symmetric product of a wedge of $k$ circles $\vee_{i=1}^k S^1_i$ deformation retracts onto the $r$-skeleton of the $k$ torus $\prod_{i=1}^k S^1_i$, where each circle is given a CW structure consisting of the wedge point and a single one-cell, and the torus has the natural product CW structure.
\end{lemma}
We will apply this observation to $M_0^{\text{inv}}$. In $S^2$, let $\nu_{i}:[0,1] \rightarrow S^2$ be a small closed curve around $z_i$ for $0 \leq i \leq n_1$, such that $\nu_i$ is oriented counterclockwise in the complement of $w_{0}$. Then
\begin{align*}
H_1(S^2 \backslash \{{\bf z}\}) &= \mathbb Z\langle \nu_0, \nu_1,\cdots,\nu_{n_1} \rangle.
\end{align*}
Now $S^2\backslash \{{\bf w}\}$ deformation retracts onto a wedge of $n_1$ circles $\vee_{i=1}^{n_1} \nu'_i$, where $\nu_i'$ is a closed curve homotopic to $\nu_i$ which passes once through the origin. Therefore $M_0^{\text{inv}} =\text{Sym}^{n_1}(S^2 \backslash \{{\bf w}\})$ deformation retracts onto the symmetric product of $\vee_{i=1}^{n_1} \nu'_i$, which in turn deformation retracts onto the product $\prod_{i=1}^{n_1} \nu'_i$. However, this product is homotopy equivalent to $\prod_{i=1}^{n_1} \nu_i$, and indeed to $\prod_{i=1}^{n_1} \alpha_i$ and to $\prod_{i=1}^n \beta_i$. We conclude that $M_0^{\text{inv}}$ has the homotopy type of an $n_1$-torus and in particular admits a deformation retraction onto each of $\mathbb T_{\boldsymbol \alpha}$ and $\mathbb T_{\boldsymbol \beta}$.
\medskip
We will require a concrete description of the cohomology rings of $M_0^{\text{inv}}$, $\mathbb T_{\boldsymbol \alpha}$, and $\mathbb T_{\boldsymbol \beta}$ for the computations of Section \ref{Stable Normal Triv Section}, so we pause to supply one now. Consider the one cycles
\begin{align*}
\overline{\nu_i}: [0,1] \rightarrow M^{\text{inv}} \\
t \mapsto (\nu_i(t)x_0\cdots x_0)
\end{align*}
\noindent where $x_0$ is any choice of basepoint. The $[\overline{\nu_i}]$ form a basis for $H_1(M^{\text{inv}})$. Ergo, letting $\widehat{[\overline{\nu_i}]}$ denote the dual of $\overline{\nu_i}$, we have
\begin{align*}
H^1(M^{\text{inv}}) &= \mathbb Z \langle \widehat{[\overline{\nu_0}]},\cdots,\widehat{[\overline{\nu_{n_1}}]} \rangle \\
H^k(M^{\text{inv}}) &= \textstyle{\bigwedge^k}H^1(M^{\text{inv}})
\end{align*}
\medskip
Similarly, we can write down the homology of the tori $\mathbb T_{\boldsymbol \alpha}$ and $\mathbb T_{\boldsymbol \beta}$. Through a slight abuse of notation, let us insist that we have parametrizations $\alpha_i:[0,1] \rightarrow S^2$ running counterclockwise in $S^2 \backslash \{w_0\}$ and $\beta_i$ running clockwise. The first homology of $\mathbb T_{\boldsymbol \alpha}$ is generated by one-cycles
\begin{align*}
\overline{\alpha_i}: [0,1] &\rightarrow \alpha_1 \times \cdots \times \alpha_{n_1} \\
t &\mapsto (y_1,\cdots,y_{i-1}, \alpha_i(t),y_{i+1},\cdots,y_{n_1}) \\
\end{align*}
\noindent where $y_j = \alpha_j(0)$, and thus the cohomology of this torus is
\begin{align*}
H^1(\mathbb T_{\boldsymbol \alpha}) &= \mathbb Z\langle \widehat{[\overline {\alpha_1}]},\cdots,\widehat{[\overline{ \alpha_{n_1}}]} \rangle \\
H^k(\mathbb T_{\boldsymbol \alpha}) &= \textstyle{\bigwedge^k}H^1(\mathbb T_{\boldsymbol \alpha})
\end{align*}
\noindent We apply analagous naming conventions to $\mathbb T_{\boldsymbol \beta}$, obtaining
\begin{align*}
H^1(\mathbb T_{\boldsymbol \beta}) &= \mathbb Z\langle \widehat{[\overline {\beta_1}]},\cdots,\widehat{[\overline {\beta_{n_1}}]} \rangle \\
H^k(\mathbb T_{\boldsymbol \beta}) &= \textstyle{\bigwedge^k}H^1(\mathbb T_{\boldsymbol \beta})
\end{align*}
Moreover, observe that under the map on homology induced by inclusion $\iota: X \hookrightarrow M^{\text{inv}} \times[0,1]$, both $[\overline{\alpha_i}]$ and of $[\overline{\beta_i}]$ are sent to $[\overline{\nu_i}]$. Therefore the map $\iota^*$ on cohomology $H^k(M^{\text{inv}} \times [0,1]) \rightarrow H^k(X)$ is precisely the diagonal map, with
\[
\textstyle{\bigwedge_{j=1}^m} \widehat{[\overline{\nu_{i_j}}]} \mapsto \textstyle{\bigwedge_{j=1}^m} \widehat{[\overline{\alpha_{i_j}}]} + \textstyle{\bigwedge_{j=1}^m} \widehat{[\overline{\beta_{i_j}}]}
\]
\begin{corollary}
The relative cohomology $H^*(M_0^{\text{inv}} \times [0,1], X)$ is the cohomology of the torus $(S^1)^{n_1}$, and in particular is torsion-free.
\end{corollary}
\begin{proof}
Consider the long exact sequence
\begin{equation*}
\xymatrix{
\cdots H^{m-1}(X) \ar[r] & H^m(M_0^{\text{inv}} \times[0,1], X) \ar[r]^-{q^*} & H^m(M_0^{\text{inv}}) \ar[r]^-{\iota^*} & H^m(X)\cdots
}
\end{equation*}
Taking into account that the obvious isomorphism between $H^*(\mathbb T_{\boldsymbol \alpha})$ and $H^*(\mathbb T_{\boldsymbol \beta})$ respects our labelling of the cohomology classes, $\iota^*$ is the diagonal map, and in particular an injection. When $m\geq 2$ we have the following short exact sequences.
\begin{equation*}
\xymatrix @R-.4cm{
0 \ar[r]& H^{m-1}(M_0^{\text{inv}} \times [0,1]) \ar[r]^-{\iota^*}\ar[r] &H^{m-1}(X) \ar[r] & H^m(M_0^{\text{inv}} \times [0,1], X) \ar[r] & 0 \\
& \textstyle{\bigwedge_{j=1}^{m-1}} \widehat{[\overline{\nu_{i_j}}]} \ar@{|->}[r] & \textstyle{\bigwedge_{j=1}^{m-1}} \widehat{[\overline{\alpha_{i_j}}]} + \textstyle{\bigwedge_{j=1}^{m-1}} \widehat{[\overline{\beta_{i_j}}]} & &
}
\end{equation*}
\noindent Ergo for $m \geq 2$, $H^m(M_0^{\text{inv}} \times [0,1], X) \cong H^{m-1}(\mathbb T_{\boldsymbol \alpha}) \cong H^{m-1}(T_{\boldsymbol \beta})$. (We will have occasion to be careful about the generators in Section \ref{Stable Normal Triv Section}.) The same result follows for $H^1(M_0^{\text{inv}} \times [0,1], X)$ trivially.
\end{proof}
Let us consider the implications of this result for the relative $K$-theory of $(M^{\text{inv}} \times [0,1], X)$, which is isomorphic to the reduced $K$-theory $\widetilde{K}((M^{\text{inv}} \times [0,1])/ X)$. (For a review of $K$-theory, see \cite[Chapter 2]{HatcherKTheory}; all of the facts used in this paper are also summarized in \cite[Section 6]{Hendricks}.) Notice that $(M^{\text{inv}} \times [0,1], X)$ deformation retracts onto the compact CW pair $(\text{Sym}^{n_1}(S^2 \backslash \cup_i \nu(w_i)) \times [0,1], X)$, where $\nu(w_i)$ is a small open neighborhood. This deformation retraction makes it legitimate to consider the reduced $K$-theory of the pair by identifying it with $\widetilde{K}((\text{Sym}^{n_1}(S^2 \backslash \cup_i \nu(w_i)) \times [0,1])/ X)$. From now on we will apply this trick without mention.
\medskip
Recall that there is a rational ring isomorphism
\begin{align*}
\widetilde{\text{ch}}:(\widetilde{K}^0(X) \oplus \widetilde{K}^1(X))\otimes \mathbb Q \rightarrow \widetilde{H}^*(X; \mathbb Q)
\end{align*}
\noindent between the rational reduced $K$-theory and rational reduced cohomology of any space $X$ chosen such that if $V$ is a line bundle over $X$ and $c_1(V)$ is the first Chern class of $X$, $\widetilde{\text{ch}}(V) = \sum_{i=1}^{\infty} \frac{c_1(V)^i}{i!}$ and $\widetilde{\text{ch}}$ is a ring homomorphism. Using this isomorphism and the Atiyah-Hirzebruch spectral sequence, Atiyah and Hirzebruch have shown that if the reduced cohomology $\widetilde{H}^*(X)$ is torsion-free, the reduced $K$-theory $\widetilde{K}^*(X)$ is as well, and the stable isomorphism class of a vector bundle is determined entirely by its Chern classes \cite[Section 2.5]{MR0139181}. In particular, since $H^*(M^{\text{inv}} \times [0,1], X) = \widetilde{H}^*((M^{\text{inv}} \times [0,1])/ X)$ is torsion-free, the stable isomorphism class of a complex vector bundle over $(M_0^{\text{inv}}, X)$ -- that is, a bundle whose restriction to $X$ is stably trivial -- is entirely determined by its Chern classes. To show such a bundle is stably trivial, it suffices to show that all its Chern classes are zero.
\section{Stable Normal Triviality of the Normal Bundle} \label{Stable Normal Triv Section}
Having seen that the symplectic geometry conditions of Seidel and Smith's theory are satisfied for $(M_i, L_0, L_1)$, when $i=0,1,2$, we now proceed to check that $(M_0, L_0, L_1)$ fulfills the complex conditions that, by Lemma \ref{Nullhomotopy Lemma}, imply the existence of a stable normal trivialization.
\begin{proposition} \label{Stable Normal Trivialization}
Consider the complex manifold $M_0=\text{Sym}^{2n_1}(S^2 \backslash \{\widetilde{{\bf w}}\})$ together with its totally real submanifolds $L_0 = \mathbb T_{\boldsymbol \beta}$ and $L_1= \mathbb T_{\boldsymbol \alpha}$, and the holomorphic involution $\tau$ which preverves $L_0$ and $L_1$. The map
\begin{align*}
(M_0^{\text{inv}} \times [0,1], (L_0 \times \{0\}) \cup (L_1 \times \{1\})) \rightarrow (BU, BO)
\end{align*}
\noindent which classifies the pullback $\Upsilon(M_0^{\text{inv}}) = N(M_0^{\text{inv}}) \times [0,1]$ of the complex normal bundle of $M_0^{\text{inv}}$ together with the totally real subbundles $NL_0^{\text{inv}} \times\{0\}$ over $L_0^{\text{inv}} \times \{0\}$ and $J(NL_1^{\text{inv}}) \times \{1\}$ over $L_1^{\text{inv}} \times \{1\}$ is nulhomotopic.
\end{proposition}
As a first step, we must establish the complex triviality of $NM_0^{\text{inv}}$ (and thus of $\Upsilon(M_0^{\text{inv}}$)) and the real triviality of $NL_i^{\text{inv}}$ for $i=1,2$.
\begin{lemma}
The complex bundle $NM_0^{\text{inv}}$ is stably trivial.
\end{lemma}
\begin{proof}
The inclusion map $\iota_1: (S^2 \backslash \{{\bf w}\}) \hookrightarrow S^2$ is nulhomotopic. Therefore the induced inclusion $\text{Sym}^{n_1}(\iota_1):\text{Sym}^{n_1}(S^2 \backslash
\{{\bf w}\}) \hookrightarrow \text{Sym}^{n_1}(S^2)$ is also nulhomotopic. The normal bundle of $M_0^{\text{inv}} = \text{Sym}^{n_1}(S^2 \backslash \{{\bf w}\})$ in $M_0 = \text{Sym}^{2n_1}(S^2 \backslash \{{\bf w}\})$ is exactly the restriction of the normal bundle
to $\text{Sym}^{n_1}(S^2)$ in $\text{Sym}^{2n_1}(S^2)$ along the inclusion map $\text{Sym}^{n_1}(\iota_1)$. As the map $\text{Sym}^{n_1}(\iota_1)$ is nulhomotopic, $NM_0^{\text{inv}}$ is stably trivial.\end{proof}
The proof of the next lemma proceeds exactly as in \cite[Lemma 7.3]{Hendricks}.
\begin{lemma} \label{Torus Triviality Lemma}
The normal bundles of $\mathbb T_{\boldsymbol \alpha} \subset \mathbb T_{\widetilde{\boldsymbol \alpha}}$ and $\mathbb T_{\boldsymbol \beta} \subset \mathbb T_{\widetilde{\boldsymbol \beta}}$ are trivial.
\end{lemma}
\medskip
We now turn to the question of relative triviality. Let $X = (L_0 \times \{0\}) \cup (L_1 \times \{1\}))$ as in Section \ref{Geometry Section}. Choose preferred trivializations of the totally real bundles $NL_0^{\text{inv}} \times \{0\}$ and $J(NL_1^{\text{inv}}) \times \{1\}$ and tensor with $\mathbb C$ to extend to a preferred trivialization of the complex bundle $\Upsilon(M_0^{\text{inv}})|_{X}$. We use this trivialization to pull back $[\Upsilon(M_0^{\text{inv}})] \in \widetilde{K}^0(M_0^{\text{inv}} \times [0,1])$ to a relative bundle $[\Upsilon(M_0^{\text{inv}})]_{\text{rel}} \in \tilde{K}^0((M_0^{\text{inv}} \times [0,1])/X)$. Because the reduced cohomology, and therefore the reduced $K$-theory, of $(M_0^{\text{inv}}\times [0,1])/X$ have no torsion, to verify triviality of the relative bundle, it suffices to show that the Chern classes of $[\Upsilon(M_0^{\text{inv}})]_{\text{rel}}$ are trivial.
\begin{remark}
It may be helpful to draw attention to a minor problem of notation: in earlier sections, $\widetilde{K}$ is a doubly periodic knot, but also in the present section $\widetilde{K}^0(B)$ is the reduced $K$-theory of the topological space $B$. We hope this will not occasion confusion.
\end{remark}
Fundamentally, the argument for relative triviality rests on the fact that each of the $n_1$ linearly independent periodic domains in $S^2 \backslash \{{\bf w}\}$ has Maslov index zero. Therefore, we pause here to recall the notation of Section \ref{Periodic Knots Section}. Let $x_i$ be the single positive intersection point in $\alpha_i \cap \beta_i$ and $y_i$ the negative intersection point. Let $F_i$ be the closure of the component of $S - \alpha_i - \beta_i$ containing $z_{i}$ and $E_i$ be the closure of the component of $S - \alpha_i -\beta_i$ containing $z_{i+1}$ (or $z_1$ if $i=n_1$). Then $P_i = E_i - F_i$ is a periodic domain of index zero on $\mathcal D$ with boundary $\beta_i - \alpha_i$. Finally, let $\gamma_i$ be the union of the arc of $\alpha_i$ running from $x_i$ to $y_i$ and the arc of $\beta_i$ running from $x_i$ to $y_i$. In particular, this specifies that $\gamma_i$ has no intersection with any $\alpha$ or $\beta$ curves other than $\alpha_i$ and $\beta_i$, and moreover the component of $S - \gamma_i$ which does not contain $w_0$ contains only a single basepoint $w_i$. See Figure \ref{Periodic Domains Figure} for an illustration of the domain $P_i$.
\begin{figure}
\caption{The periodic domain $P_i = E_i - F_i$ has Maslov index zero.}
\label{Periodic Domains Figure}
\begin{tikzpicture}
\def \betacurve {(1.25,0)ellipse (54 pt and 24 pt)};
\def \alphacurve {(-1.25,0)ellipse (54 pt and 24 pt)};
\node(1)[font = \small] at (0,0) {${w_i}$};
\node(2)[font=\small] at (-2.5,0){${z_i}$};
\node(3)[font=\small] at (2.5,0){${z_{i+1}}$};
\node(4) at (-1.5,0){$F_i$};
\node(5) at (1.5,0){$E_i$};
\node(6) at (-2,1.1){$\alpha_i$};
\node(7) at (2,1.1){$\beta_i$};
\node(9) at (0,1)[font = \small]{$x_i$};
\draw [red][line width = 2 pt] (-1.25,0)ellipse (55 pt and 25 pt);
\draw [blue, dashed][line width = 2 pt](1.25,0)ellipse (55 pt and 25 pt);
\begin{scope}[even odd rule]
\clip \alphacurve \betacurve;
\fill[gray, opacity = .5] \betacurve;
\fill[gray, opacity = .5] \alphacurve;
\end{scope}
\end{tikzpicture}
\end{figure}
\medskip
The structure of the argument is as follows: for each even $k$ such that $1 \leq k \leq 2n_1$, we will use the periodic domains $P_i$ to produce a set of $k$-chains $\{W_{\bf I}\}$ in $(M_0^{\text{inv}} \times [0,1], X)$ whose relative homology classes generate the $k$th homology of $H_k(M_0^{\text{inv}} \times[0,1], X)$. We will then show that the restriction of $\Upsilon(M^{\text{inv}}_0)|_{\text{rel}}$ to each $W_{\bf I}$ is trivial as a relative vector bundle, and that therefore $\langle c_k(\Upsilon(M_0^{\text{inv}})|_{\text{rel}}), [W_{\bf I}] \rangle=0$. Since the $[W_{\bf I}]$ generate $H_k(M_0^{\text{inv}}\times[0,1], X)$, we will have proven that $c_k(\Upsilon(M_0^{\text{inv}})|_{\text{rel}})$ is identically zero.
\medskip
More specifically, we will describe the chains $W_{\bf I}$ as a subset of a product of two-chains $Y_i$ in $(S^2 \backslash \{{\bf w}\}) \times [0,1]$ such that the projection to $S^2 \backslash \{{\bf w}\}$ is the periodic domain $P_i$ and the $Y_i$ are pairwise disjoint. We will then show that the restriction of the bundle $\Upsilon(M^{\text{inv}}_0)|_{\text{rel}}$ to $W_{\bf I}$ also breaks up as the restriction of a product of relative bundles $\Upsilon(Y_i)$ over the $Y_i$ which are known to be trivial through Maslov index arguments. Let us begin by constructing these manifolds $Y_i$.
\medskip
For $1 \leq i \leq n_1$, let $Y_i$ be a subspace of $S^2 \times [0,1]$ with the following properties:
\medskip
\begin{itemize}
\item $Y_i$ is topologically $S^1 \times [0,1]$, and $Y_i \cap (S^2 \times \{t\})$ is $S^1$ for all $t$.
\item The boundary of $Y_i$ is $\beta_i \times \{0\} - \alpha_i \times \{1\}$.
\item The projection of $Y_i$ to the punctured sphere is a copy of the periodic domain $P_i$.
\item $Y_i \cap Y_j = \emptyset$ if $i \neq j$.
\item If $x_i$ is the point of positive intersection of $\alpha_i$ and $\beta_i$, $\{x_i\} \times [0,1] \subset Y_i$.
\end{itemize}
\medskip
The 2-chains $Y_i$ are constructed as follows: for $t \in [0, \frac{1}{2}]$, choose a linear homotopy $H_t$ from $\beta_i$ to $\gamma_i$ inside the domain $E_i$ which fixes $\beta_i \cap \gamma_i$. Let the intersection of $Y_i$ with $S^2 \times \{t\}$ be the embedded circle $H_t(\beta_i) \times \{t\}$. Similarly, for $t \in [\frac{1}{2}, 1]$, choose a linear homotopy $J_t$ from $\gamma_i$ to $\alpha_i$ inside the domain $F_i$ which fixes $\alpha_i \cap \gamma_i$, and let the intersection of $Y_i$ with $S^2 \times \{t\}$ be $J_t(\gamma_i)$.
\medskip
Observe that this description of $Y_i$ has the following properties: first, $Y_i$ is contained in $(S^2 \backslash \{{\bf w}\}) \times [0,1]$ as promised. Second, $Y_i$ contains the line segment $\{x_i\} \times [0,1]$. Third, the intersection of $Y_i$ with $(\alpha_i \cup \beta_i)\times (0,1)$ is entirely contained in the cylinder $\gamma_i \times (0,1)$. Finally, the projection of $Y_i \cap (S^2 \times [0, \frac{1}{2}])$ to $S^2$ lies entirely ``inside'' $\beta_i$ - that is, on the component of $S^2 \backslash \beta_i$ not containing $w_0$ - implying that the sets $Y_i \cap (S^2 \times [0, \frac{1}{2}])$ are pairwise disjoint. Similarly, the projection of $Y_i \cap (S^2 \times [\frac{1}{2},1])$ to $S^2$ lies entirely ``inside'' $\alpha_i$, and therefore the sets $Y_i \cap (S^2 \times [\frac{1}{2},1])$ are pairwise disjoint. Ergo the $Y_i$ are pairwise disjoint.
\medskip
We are now ready to define the complex line bundles $\Upsilon(Y_i)$. Recall from Section \ref{Floer Cohomology Section} that there is a holomorphic embedding
\begin{align*}
\iota :M^{\text{inv}}_0 = \text{Sym}^{n_1}(S^2 \backslash \{{\bf w}\}) &\rightarrow \text{Sym}^{n_1}(S^2 \backslash \{\widetilde{\bf w}\}) = M_0 \\
(x_1...x_{n_1}) &\mapsto (x_1^1x_1^2...x_{n_1}^1 x_{n_1}^2).
\end{align*}
\noindent Similarly, we have a holomorphic embedding
\begin{align*}
\iota': (S^2 \backslash \{{\bf w}\}) &\rightarrow \text{Sym}^2(S^2 \backslash \{\widetilde{\bf w}\}) \\
x &\mapsto (x^1x^2)
\end{align*}
which takes a point $x$ on $S^2 \backslash \{{\bf w}\}$ to the unordered pair consisting of its two (not necessarily distinct) lifts on $S^2 \backslash \{\widetilde{{\bf w}}\}$ under the projection map $\pi: (S^2 \backslash \{\widetilde{\bf w}\}) \rightarrow (S^2 \backslash \{{\bf w}\})$. Notice that for each $1 \leq i \leq n_1$, $\iota'(\alpha_i) \subset \alpha_i^1 \times \alpha_i^2$ and $\iota'(\beta_i) \subset \beta_i^1 \times \beta_i^2$. Let $N(S^2 \backslash \{{\bf w}\})$ be the normal bundle to $S^2 \backslash \{{\bf w}\}$ in the second symmetric product $\text{Sym}^2(S^2 \backslash \{\widetilde{\bf w}\})$. This is a complex line bundle over a punctured sphere, hence trivial. For each $1 \leq i \leq n_1$, let $N\alpha_i$ be the real normal bundle to $\alpha_i$ in $\alpha_i^1 \times \alpha_i^2$ and $N\beta_i$ be the real normal bundle to $\beta_i$ in $\beta_i^1 \times \beta_i^2$. An argument similar to that of Lemma \ref{Torus Triviality Lemma} shows that $N\alpha_i$ and $N\beta_i$ are trivial real line bundles for all $i$.
\medskip
Consider the pullback of the normal bundle $N(S^2 \backslash \{{\bf w}\})$ to $(S^2 \backslash \{{\bf w}\}) \times [0,1]$. The complex line bundle over $Y_i$ that interests us is the restriction of this pullback to $Y_i$, which we shall by analogy denote $\Upsilon(Y_i)$. That is, $\Upsilon(Y_i) = (N(S^2 \backslash \{{\bf w}\}) \times [0,1])|_{Y_i}$. This complex line bundle has totally real subbundles $J(N\alpha_i \times \{1\})$ over $\alpha_i \times \{1\}$ and $N\beta_i \times \{0\}$ over $\beta_i \times \{0\}$. Choose preferred real trivializations of these real line bundles, and extend to a complex trivialization of $\Upsilon(Y_i)|_{(\beta_i \times \{0\}) \cup (\alpha_i \times \{1\})}$ by tensoring with $\mathbb C$. For convenience, let this subspace $(\beta_i \times \{0\}) \cup (\alpha_i \times \{1\})$ be $X_i$.
\begin{lemma}
For $1 \leq i \leq n_1$, given any preferred trivialization of $\Upsilon(Y_i)|_{X_i}$ as a complex vector bundle, the relative vector bundle $\Upsilon(Y_i)|_{\text{rel}}$ over $(Y_i,X_i)$ is stably trivial.
\end{lemma}
\begin{proof}
Let $\iota_i$ be the inclusion of $Y_i$ into $(S^2 \backslash \{{\bf w}\})\times[0,1]$, and $p$ be the projection of $(S^2 \backslash \{{\bf w}\})\times [0,1]$ to $S^2 \backslash \{{\bf w}\}$. Then the image of $p \circ \iota_i(Y_i)$ is the periodic domain $P_i$.
\medskip
Consider the commutative diagram below. The top horizontal inclusion of $S^2 \backslash \{w\}$ into $\text{Sym}^{n_1}(S^2 \backslash \{{\bf w}\})$ is defined by mapping a point $x$ to $(x x_1...\widehat{x_i}...x_{n_1})$, where again each $x_j$ is the positively oriented point in $\alpha_j \cup \beta_j$. The bottom inclusion of $\text{Sym}^2(S^2 \backslash \{\widetilde{\bf w}\})$ into $\text{Sym}^{2n_1}(S^2 \backslash \{\widetilde{\bf w}\})$ sends an unordered pair $(xy)$ to $(xyx_i^1x_i^2...\widehat{x_i^1}\widehat{x_i^2}...x_{n_1}^1x_{n_1}^2)$.
\[
\xymatrix{
Y_i \ar[r]^-{p \circ \iota_i} \ar[dr] & S^2 \backslash \{{\bf w}\} \ar@{^{(}->}[d]^-{\iota'} \lhook\mkern-7mu\ar[r]& \text{Sym}^{n_1}(S^2 \backslash \{{\bf w}\}) \ar@{^{(}->}[d]^-{\iota} \\
& \text{Sym}^2(S^2 \backslash \{\widetilde{\bf w}\}) \lhook\mkern-7mu\ar[r]& \text{Sym}^{2n_1}(S^2 \backslash \{\widetilde{\bf w}\})
}
\]
Consider the map $\phi:Y_i \rightarrow \text{Sym}^{n_1}(S^2 \backslash \{{\bf w}\})$ given by composition along the top row of the diagram. This is a topological annulus representing the periodic domain $P_i$. Since $P_i$ has Maslov index zero, by the discussion in Section \ref{Heegaard Floer Background Section}, the first Chern class of the pullback of the complex tangent bundle $T\text{Sym}^{n_1}(S^2 \backslash \{{\bf w}\})$ to $Y_i$ relative to the complexification of the pullbacks of the real tangent bundles $J(T(\mathbb T_{\boldsymbol \alpha}))$ to one component of the boundary of $Y_i$ and $T(\mathbb T_{\boldsymbol \beta})$ to the other is zero. However, the pullback $\phi^*(T\text{Sym}^{n_1}(S^2 \backslash \{{\bf w}\}))$ of the tangent bundle of the total symmetric product to $Y_i$ is exactly $(p \circ \iota)_*(T(S^2 \backslash \{{\bf w}\}) \oplus \mathbb C^{n_1 - 1})$, where the factors of $\mathbb C$ are the restriction of the tangent bundle of the punctured sphere to the points $x_j$ such that $j \neq i$. Moreover, $(p \circ \iota)^*(T(S^2 \backslash \{{\bf w}\}))$ is precisely the restriction of the pullback bundle $p^*(T(S^2 \backslash \{{\bf w}\})) = T((S^2 \backslash \{{\bf w}\})\times [0,1])$ over $(S^2 \backslash \{{\bf w}\}) \times [0,1]$ to the subspace $Y_i$. Therefore $\phi^*(T\text{Sym}^{n_1}(S^2 \backslash \{{\bf w}\})) = T((S \backslash \{{\bf w}\}) \times [0,1])|_{Y_i} \oplus \mathbb C^{n_1 - 1}$.
\medskip
Similarly, the pullback $\phi^*(J(T (\mathbb T_{\boldsymbol \alpha})))$ to the boundary component $\alpha_i \times \{1\} \subset Y_i$ is $(p \circ \iota_i)^*(J(T(\alpha_i)\oplus \mathbb R^{n_1-1}))$, where the factors of $\mathbb R$ are the canonical real subspace of the tangent bundle to the punctured sphere at the points $x_j$ for $j \neq i$. The pullback of this bundle to $\alpha_i \times \{1\} \subset Y_i$ is $J((T(\alpha_i) \times \{1\}) \oplus \mathbb R^{n_1-1})$. A similar argument shows that $\phi^*(J(T(\mathbb T_{\boldsymbol \beta})))$ is $(T(\beta_i)\times \{0\}) \oplus \mathbb R^{n_1-1}$. Therefore we have seen that the complex vector bundle $T((S^2 \backslash \{{\bf w}\})\times [0,1])|_{Y_i} \oplus \mathbb C^{n_1-1}$ relative to the complexification of its totally real subbundles $(T(\beta_i)\times \{0\})\oplus \mathbb R^{n_1+1}$ over $\beta_i \times \{0\}$ and $J((T(\alpha_i)\times \{1\})\oplus \mathbb R^{n_1+1})$ over $\alpha_i \times \{1\}$ has relative first Chern class zero. Hence the same is true of the complex line bundle $T((S^2 \backslash \{{\bf w}\})\times [0,1])|_{Y_i}$ relative to the complexification of its totally real subbundles $J(T(\alpha_i)\times \{1\})$ and $T(\beta_i)\times\{0\}$. As this is a line bundle, triviality of the first relative Chern class suffices to show stable triviality of the relative vector bundle.
\medskip
Now consider the map $\widetilde{\phi} = \iota \circ \phi$ from $Y_i$ to $\text{Sym}^{2n_1}(S^2 \backslash \{\widetilde{\bf w}\})$ given by any path through the diagram from $Y_i$ to $\text{Sym}^{2n_1}(S^2 \backslash \{\widetilde{\bf w}\})$. This is a topological annulus representing the periodic domain $\pi^{-1}(P_i)$ in $\widetilde{\mathcal D}$, which also has Maslov index zero. Therefore the pullback along $\widetilde{\phi}$ of the complex tangent bundle $T(\text{Sym}^{2n_1}(S^2 \backslash \{\widetilde{\bf w}\}))$ to $Y_i$ relative to complexifications of pullbacks of the totally real subbundles $J(T(\mathbb T_{\widetilde{\boldsymbol \alpha}}))$ and $T(\mathbb T_{\widetilde{\boldsymbol \beta}})$ to $\alpha_i \times \{1\}$ and $\beta_i \times \{0\}$ has trivial relative first Chern class. However, once again the pullback of this relative bundle along the inclusion map $\text{Sym}^{2}(S^2 \backslash \{\widetilde{\bf w}\}) \hookrightarrow \text{Sym}^{2n_1}(S^2 \backslash \{\widetilde{\bf w}\})$ decomposes into a much smaller tangent bundle together with a trivial summand. Observe that $\widetilde{\phi}^*(T(\text{Sym}^{2n_1}(S^2 \backslash \{\widetilde{\bf w}\}))$ is $(\iota' \circ (p \circ \iota_i))^*(T(\text{Sym}^{2}(S^2 \backslash \{\widetilde{\bf w}\})) \oplus \mathbb C^{2n_1 - 2}))$. Pulling back along the inclusion map $\iota'$, we see that this bundle breaks up as $T(S^2 \backslash \{{\bf w}\}) \oplus N(S^2 \backslash \{{\bf w}\}) \oplus \mathbb C^{2n_1 - 2}$ over $S^2 \backslash \{{\bf w}\}$, and that therefore its ultimate pullback to $Y_i$ is $T((S^2 \backslash \{{\bf w}\}) \times [0,1]) \oplus \Upsilon(Y_i) \oplus \mathbb C^{2n_1 -2}$.
\medskip
Similarly, the pullback along $\widetilde{\phi}$ of $J(T(\mathbb T_{\widetilde{\boldsymbol \alpha}}))$ to $\alpha_i \times \{1\}$ is precisely $J(T{\alpha_i} \times \{1\}) \oplus J(N{\alpha_i} \times \{1\}) \oplus J(\mathbb R^{2n_1-2})$. Finally, the pullback along $\widetilde{\phi}$ of $T(\mathbb T_{\widetilde{\boldsymbol \beta}})$ to $\beta_i \times \{0\}$ is $(T{\beta_i} \times \{0\}) \oplus (N_{\beta_i} \times \{0\}) \oplus \mathbb R^{2n_1-2}$. Dropping the trivial summands, we conclude that the first relative Chern class of $T((S^2 \backslash \{{\bf w}\}) \times [0,1]) \oplus \Upsilon(Y_i)$ relative to complexifications of the totally real subbundle $(T{\beta_i} \times \{0\}) \oplus (N_{\beta_i} \times \{0\})$ over $\beta_i \times \{0\}$ and the totally real subbundle $J(T{\alpha_i} \times \{1\}) \oplus J(N{\alpha_i} \times \{1\})$ over $\alpha_i \times \{1\}$ has relative first Chern class zero. This, combined with our previous conclusions concerning relative triviality of $T((S^2 \backslash \{{\bf w}\}) \times [0,1])$ with respect to its subbundles $T\beta_i \times \{1\}$ over $\beta_i \times \{1\}$ and $J(T\alpha_i \times \{0\})$ over $\alpha_i \times \{0\}$, implies that $\Upsilon(Y_i)$ has relative first Chern class zero with respect to complexifications of its subbundles $N\beta_i \times \{0\}$ and $J(N\alpha_1 \times \{1\})$, as promised.
\end{proof}
We are now ready to construct generators for $H_k(M_0^{\text{inv}} \times [0,1], X)$, and use them to prove that the relative Chern classes of $[\Upsilon(M_0^{\text{inv}})]$ are identically zero. Recall that for $k > 1$ there are short exact sequences on homology
\[
\xymatrix{
0 \ar[r] & H_{k+1}(M_0^{\text{inv}} \times [0,1], X) \ar[r] & H_k(\mathbb T_{\boldsymbol \beta}) \oplus H_k(\mathbb T_{\boldsymbol \alpha}) \ar[r]^-{\iota_*} & H_k(M_0^{\text{inv}}) \ar[r] &0.
}
\]
As in Section \ref{Geometry Section}, for $k>1$, the $k+1$st homology group $H_{k+1}(M_0^{\text{inv}} \times [0,1], X)$ is the kernel of the map $(\iota_*)$. Let us take a closer look at generators for this group. We have seen that the kernel of $\iota_*$ is $\mathbb Z \langle \textstyle{\bigwedge_{j=1}^{k}} [\beta_{i_j}] \oplus -\textstyle{\bigwedge_{j=1}^k} [\alpha_{i_j}] : 1 \leq i_1 < \cdots <i_k \leq n_1 \rangle$. Therefore for each ${\bf I} = (i_1,...,i_k)$ such that $1 \leq i_1 < \cdots < i_k \leq n_1$, there is a $(k+1)$-chain $W_{\bf I}$ in $M_0^{\text{inv}} \times [0,1]$ whose boundary is $\prod_{j=1}^{k} \beta_{i_j} - \prod_{j=1}^k \alpha_{i_j}$. We will describe this chain in terms of the two-chains $Y_i$.
\medskip
Construct the $k$ chain $W_{\bf I}$ as follows. Let $W_{\bf I}$ be topologically $(S^1)^k \times [0,1]$, and insist that the intersection of $W_{\bf I}$ with $M_0^{\text{inv}} \times \{t\}$ is the product $\prod_{j=1}^k Y_{i_j} \cap (S^2 \backslash \{{\bf w}\}) \times \prod_{i \notin {\bf I}}\{x_i\}$. Since the intersection of each $Y_{i_j}$ with $(S^2 \backslash \{{\bf w}\}) \times \{t\}$ is a circle for each $t$, this says that the intersection of $W_{\bf I}$ with $M_0^{\text{inv}}\times \{t\}$ is a $k$-torus for all $t$.
\medskip
Notice that for all $t$, $W_{\bf I} \cap (M_0^{\text{inv}} \times \{t\})$ is a subset of the product $\prod_{i=1}^{n} (Y_i \cap ((S^2 \{{\bf w}\})\times [0,1])$. (Recall here that the line segments $\{x_i\} \times [0,1]$ are subsets of $Y_i$ for each $i$.) Since the $Y_i$ are pairwise disjoint, each $W_{\bf I}\cap(M_0^{\text{inv}}\times \{t\})$ is a submanifold of $M_0^{\text{inv}} = \text{Sym}^{n_1}(S^2 \backslash \{{\bf w}\})$ and is disjoint from the fat diagonal. Furthermore, since $W_{\bf I}$ is homeomorphic to $(S^1)^k \times [0,1]$ and $\partial W_{\bf I}$ is
$\prod_{j=1}^{k} \beta_{i_j} - \prod_{j=1}^k \alpha_{i_j}$, we see that the collection $\{W_{\bf I}: 1 \leq i_1 < \cdots < i_k \leq n_1\}$ generates $H^k(M_0^{\text{inv}}\times [0,1], X)$.
\medskip
Let us consider the restriction of $\Upsilon(M_0^{\text{inv}})$ to $W_{\bf I}$ for some particular ${\bf I}$. We pick preferred trivializations of $N(L_0) \times \{0\}) = N(\mathbb T_{\boldsymbol \beta}) \times \{0\}$ and $J(N(L_1) \times \{1\}) = J(N(\mathbb T_{\boldsymbol \alpha}) \times \{1\})$ which are products of preferred trivializations of the real subbundles $N(\beta_i) \times \{0\}$ and $J(N(\alpha_i) \times \{1\})$ in $\Upsilon(Y_i)$.
\medskip
Because $W_i \cap (M_0^{\text{inv}} \times \{t\})$ lies entirely off the fat diagonal for each $t$, the restriction of $\Upsilon(M_0^{\text{inv}})$ to each $W_i$ also decomposes as a product bundle. In other words, regard $W_i$ as a subset of the abstract product $\prod_{i=1}^{n_1} Y_i$ (which is \textit{not} a submanifold of $M_0^{\text{inv}} \times [0,1]$). Then $\Upsilon(M_0^{\text{inv}})|_{W_{\bf I}}$ is the restriction of the product bundle $(\prod\Upsilon(Y_i))|_{W_{\bf I}}$. Therefore, since each $\Upsilon(Y_i)$ admits a trivialization with respect to a choice of trivialization of the bundle restricted to $\alpha_i$ and $\beta_i$, we conclude that $\Upsilon(M_0^{\text{inv}})|_{W_{\bf I}}$ is trivializable with respect to preferred trivializations of the bundle over $\mathbb T_{\boldsymbol \beta}\times \{0\}$ and $J(\mathbb T_{\boldsymbol \alpha} \times \{1\})$.
\begin{remark}
We could as easily have shown that $(\text{Sym}^{2n_1}(S^2 \backslash \{{\bf z}\}), \mathbb T_{\widetilde{\boldsymbol \beta}}, T_{\widetilde{\boldsymbol \alpha}})$ has a stable normal trivialization. In the case studied here this would produce a spectral sequence from $\widehat{\mathit{HF}}(S^3) \otimes W^{\otimes 2n_1}$ to $\widehat{\mathit{HF}}(S^3) \otimes W^{\otimes n_1}$, which is not especially interesting.
\end{remark}
\section{Examples}
In this section we produce some examples of the behavior of the spectral sequences of Theorems \ref{Link Floer Homology Spectral Sequence} and \ref{Knot Floer Homology Spectral Sequence}. We present the cases of the unknot and the trefoil as doubly-periodic knots. To simplify matters, we will compute all spectral sequences using appropriate chain complexes tensored with $\mathbb Z_2((\theta))$, instead of tensoring with $\mathbb Z_2[[\theta]]$ and later further tensoring the $E^{\infty}$ page with $\theta^{-1}$. Then the $E^1$ page of each spectral sequence is a free module with generators the link Floer or knot Floer homology of our doubly periodic knot, and the $E^{\infty}$ page is a free module over $\mathbb Z((q))$ with generators the link or knot Floer homology of the quotient. (The only information we lose by this change is that the $E^{\infty}$ page is not the borel Heegaard Floer link or knot homology, but rather the borel homology tensored with $\theta^{-1}$.)
\subsection{The case of the Unknot}
In the simplest possible case, let $\widetilde{K}$ be an unknot, and $K$ its quotient knot, a second unknot. Consider the diagram $\mathcal D$ for $K \cup U$ on the sphere $S^2 = \mathbb C^2$ in which $K$ is the unit circle with basepoints $w_0, z_0$ on $U$ and basepoints $w_1, z_1$ on $K$ such that $w_0$ lies at $\infty$, $z_0$ lies at $0$, $w_1$ lies at $i$, and $z_1$ lies at $-i$. Supply a single $\alpha_1$ and $\beta_1$ as in Figure \ref{Unknot Heegaard Diagrams Figure} coherently with a clockwise orientation of $K$.
\begin{figure}
\caption{An equivariant Heegaard diagram for the unknot together with an axis (i.e. a Hopf link), and its quotient Heegaard diagram (another Hopf link).}
\label{Unknot Heegaard Diagrams Figure}
\subfloat{\begin{tikzpicture}[scale=.9]
\centering
\tikzstyle{every node}=[font=\small];
\node(1) at (0,1.8){$w_1^1$};
\node(2) at (0,-1.8){$w_1^2$};
\node(3) at (1.8,0){$z_1^1$};
\node(4) at (-1.8,0){$z_1^2$};
\node(5) at (0,0){$z_0$};
\node(6) at (-2.5,2.5){$w_0$};
\tikzstyle{every node}=[font=\tiny];
\node(7) at (0,2.6){$a^1$};
\node(8) at (.05,.65){$b^1$};
\node(9) at (0,-2.6){$a^2$};
\node(10) at (.05,-.65){$b^2$};
\node(11) at (2.6,0){$e^1$};
\node(12) at (.65,0){$c^1$};
\node(13) at (-2.6,0){$e^2$};
\node(14) at (-.65,0){$c^2$};
\tikzset{every path/.style={line width = 2 pt}};
\draw [rotate=-45][red](0,1.4) ellipse (49pt and 20pt);
\draw[rotate=-45][magenta](0,-1.4) ellipse (49pt and 20pt);
\draw[rotate=45][dashed][blue](0,1.4) ellipse (49pt and 20pt);
\draw[rotate=45][dashed][cyan](0,-1.4) ellipse (49pt and 20pt);
\end{tikzpicture}}
\hspace{1 cm}
\subfloat{\begin{tikzpicture}[scale = .9]
\centering
\tikzstyle{every node}=[font=\small];
\node(1) at (0,0){$z_0$};
\node(2) at (-2.5,2.5){$w_0$};
\node(3) at (0,1.75){$w_1$};
\node(4) at (0,-1.75){$z_1$};
\tikzstyle{every node}=[font=\tiny];
\node(5) at (0,2.5){$a$};
\node(6) at (0,1){$b$};
\node(7) at (0,-1){$c$};
\node(8) at (0,-2.5){$e$};
\tikzset{every path/.style={line width = 2 pt}};
\draw[blue][dashed](0,2.25)..controls (-3, 2.25) and (-3,-2.25)..(0,-2.25);
\draw[blue][dashed](0,2.25)..controls(.7,2.25) and (.7,1.25) ..(0,1.25);
\draw[blue][dashed](0,-2.25)..controls(.7,-2.25) and (.7,-1.25) ..(0,-1.25);
\draw[blue][dashed](0,1.25)..controls (-1.7,1.25) and (-1.7,-1.25)..(0,-1.25);
\draw[red](0,2.25)..controls (3, 2.25) and (3,-2.25)..(0,-2.25);
\draw[red](0,2.25)..controls(-.7,2.25) and (-.7,1.25) ..(0,1.25);
\draw[red](0,-2.25)..controls(-.7,-2.25) and (-.7,-1.25) ..(0,-1.25);
\draw[red](0,1.25)..controls (1.7,1.25) and (1.7,-1.25)..(0,-1.25);
\end{tikzpicture}}
\end{figure}
\medskip Label the intersection points $a, b, c, e$ vertically down the diagram as in the figure. (Since we plan to discuss the differentials $d_i$ in the spectral sequence, we will not also use $d$ to label any intersection points.) There are no differentials that count for $\widehat{\mathit{HFL}}(\mathcal D)$ -- which is exactly the link Floer homology of the Hopf link with positive linking number -- and three differentials that count for $\widehat{\mathit{HFK}}(\mathcal D) = \widehat{\mathit{HFK}}(S^3, K) \otimes W$. See the table of Figure \ref{Unknot Complexes Figure} for the Alexander gradings of these entries.
\begin{figure}
\caption{Alexander gradings and differentials for $\widehat{\mathit{CFL}}(\mathcal D)$ (on the left) and $\widehat{\mathit{CFK}}(\mathcal D)$ (on the right).}
\label{Unknot Complexes Figure}
\subfloat{
\centering
\begin{tikzpicture}
\path (-2,2)edge (1,2);
\path (-2,-1)edge (-2,2);
\path (-2,-1) edge (1,-1);
\path (1,-1) edge (1,2);
\path (-2, 1) edge (1,1);
\path (-2,0) edge (1,0);
\path (-1,2) edge (-1,-1);
\path (0,2) edge (0,-1);
\path (-2,2) edge (-1,1);
\node (b) at (-.5,.5){$b$};
\node (a) at (.5,.5){$a$};
\node (c) at (-.5,-.5){$c$};
\node (e) at ( .5,-.5){$e$};
\node at (-.5,1.5){$-\frac{1}{2}$};
\node at (.5,1.5){$\frac{1}{2}$};
\node at (-1.5,.5){$\frac{1}{2}$};
\node at (-1.5,-.5){$-\frac{1}{2}$};
\node at (-1.7,1.3){$A_2$};
\node at (-1.3,1.7){$A_1$};
\end{tikzpicture}}
\hspace{1 cm}
\subfloat{
\begin{tikzpicture}
\path (-2,2)edge (1,2);
\path (-2,-1)edge (-2,2);
\path (-2,-1) edge (1,-1);
\path (1,-1) edge (1,2);
\path (-2, 1) edge (1,1);
\path (-1,0) edge (1,0);
\path (-1,2) edge (-1,-1);
\path (0,2) edge (0,-1);
\node (b) at (-.5,.5){$b$};
\node (a) at (.5,.5){$a$};
\node (c) at (-.5,-.5){$c$};
\node (d) at ( .5,-.5){$e$};
\path[->,dashed](b)edge(c);
\path[->,dashed][bend left](a)edge(d);
\path[->,dashed][bend right](a)edge(d);
\node at (-.5,1.5){$-1$};
\node at (.5,1.5){$0$};
\node at (-1.5,1.5){$A_1$};
\end{tikzpicture}}
\end{figure}
\medskip
Now lift to a diagram $\widetilde{\mathcal D}$ for the same Hopf link, which has basepoints $w_0, z_0$ on the axis $U$ and $w_1^1, w_1^2, z_1^1, z_1^2$ on the lifted unknot $\widetilde{K}$. These basepoints lie on $S^2 = \mathbb C \cup \{\infty\}$ as follows: $w_0$ and $z_0$ lie at $0$ and $\infty$ as previously, and $w_1^1, w_1^2, z_1^1, z_1^2$ lie at $i, -i, 1, -1$ respectively. There are two curves $\alpha_1^1$ and $\alpha_1^2$ encircling the pairs $w_1^1, z_1^1$ and $w_1^2, z_1^2$, and two curves $\beta_1^1$ and $\beta_1^2$ encircling pairs $z_1^2, w_1^1$ and $z_1^1, w_1^2$. The intersection points $a, b, c, e$ lift to eight points $a^1,a^2,b^1,b^2,c^1,c^2,e^1,e^2$ on the diagram in $\cup (\alpha_1^i \cap \beta_1^j)$. (The numbering of each pair is arbitrarily determined by insisting that $a^i$ lie on $\alpha_1^i$, and so on.) The complex $\widehat{\mathit{CFL}}(\widetilde{\mathcal D})$ has eight generators, whose Alexander gradings are laid out in the left table of Figure \ref{Lifted Unknot Complexes Figure}; there are no differentials that count for the theory $\widetilde{\mathit{HFL}}(\widetilde{\mathcal D}) = \widehat{\mathit{HFL}}(S^3, \widetilde{K} \cup U) \otimes V_1$. Allowing differentials that pass over the basepoint $z_0$, we obtain the complex of the right table Figure \ref{Lifted Unknot Complexes Figure} which computes $\widetilde{\mathit{HFK}}(\mathcal D) = \widehat{\mathit{HFK}}(S^3, \widetilde{K}) \otimes V_1 \otimes W$.
\begin{figure}
\caption{Alexander gradings and differentials of $\widehat{\mathit{CFL}}(\widetilde{\mathcal D})$ (left) and $\widehat{\mathit{CFK}}(\widetilde{\mathcal D})$ (right).}
\label{Lifted Unknot Complexes Figure}
\subfloat{
\centering
\begin{tikzpicture}
\path(-3,2)edge(2,2);
\path(-3,2)edge(-3,-1);
\path(-3,-1)edge(2,-1);
\path(2,2)edge(2,-1);
\path(-2,2)edge(-2,-1);
\path(-1,2)edge(-1,-1);
\path(1,2)edge(1,-1);
\path(-3,1)edge(2,1);
\path(-3,0)edge(2,0);
\path(-3,2)edge(-2,1);
\node at (-2.7,1.3){$A_2$};
\node at (-2.3,1.7){$A_1$};
\node at (-1.5,1.5){$-\frac{3}{2}$};
\node at (0,1.5){$-\frac{1}{2}$};
\node at (1.5,1.5){$\frac{1}{2}$};
\node at (-2.5, .5){$\frac{1}{2}$};
\node at (-2.5,-.5){$-\frac{1}{2}$};
\node (1) at (-1.5,.5){$b^1b^2$};
\node (2) at (-1.5,-.5){$c^1c^2$};
\node (3) at (1.5,.5){$a^1a^2$};
\node (4) at (1.5,-.5){$e^1e^2$};
\node (5) at (-.5,.5){$b^1a^2$};
\node (6) at (.5,.5){$a^1b^2$};
\node (7) at (-.5,-.5){$c^1e^2$};
\node (8) at (.5,-.5){$e^1c^2$};
\end{tikzpicture}}
\hspace{1 cm}
\subfloat{
\centering
\begin{tikzpicture}
\path(-3,2)edge(2,2);
\path(-3,2)edge(-3,-1);
\path(-3,-1)edge(2,-1);
\path(2,2)edge(2,-1);
\path(-2,2)edge(-2,-1);
\path(-1,2)edge(-1,-1);
\path(1,2)edge(1,-1);
\path(-3,1)edge(2,1);
\path(-2,0)edge(2,0);
\node at (-2.5,1.5){$A_1$};
\node at (-1.5,1.5){$-2$};
\node at (0,1.5){$-1$};
\node at (1.5,1.5){$0$};
\node (1) at (-1.5,.5){$b^1b^2$};
\node (2) at (-1.5,-.5){$c^1c^2$};
\node (3) at (1.5,.5){$a^1a^2$};
\node (4) at (1.5,-.5){$e^1e^2$};
\node (5) at (-.5,.5){$b^1a^2$};
\node (6) at (.5,.5){$a^1b^2$};
\node (7) at (-.5,-.5){$c^1e^2$};
\node (8) at (.5,-.5){$e^1c^2$};
\path[->, dashed](1)edge(2);
\path[->,dashed][bend left](3)edge(4);
\path[->,dashed][bend right](3)edge(4);
\path[->,dashed](5)edge(7);
\path[->,dashed](5)edge(8);
\path[->,dashed](6)edge(7);
\path[->,dashed](6)edge(8);
\end{tikzpicture}}
\end{figure}
\medskip
First, consider the spectral sequence for link Floer homology of the double complex $(\widehat{\mathit{CFL}}(\widetilde{\mathcal D} \otimes \mathbb Z_2, \partial + (1 + \tau^{\#})\theta)$. Since $\partial = 0$ on this complex, the only nontrivial differential occurs on the $E^1$ page and is exactly $d_1 = 1 + \tau^{\#}$. Therefore the $E^2$ page of the spectral sequence is $\mathbb Z_2((\theta))\langle a^1a^2, b^1b^2, c^1c^2, e^1e^2 \rangle$, which is isomorphic to $\widehat{\mathit{HFL}}(S^3, K \cup U) \otimes \mathbb Z_2((\theta))$, as expected.
\medskip
Next, consider the spectral sequence of the double complex $(\widehat{\mathit{CFK}}(\widetilde{\mathcal D} \otimes \mathbb Z_2, \partial_{U} + (1 + \tau^{\#})\theta)$. We have the complex of Figure \ref{Lifted Unknot Complexes Figure}; the $E^1$ page of the spectral sequence is equal to $\mathbb Z_2\langle [a^1a^2], [e^1e^2], [a^1b^2+b^1a^2], [c^1e^2] \rangle \otimes \mathbb Z_2((\theta))$. Bearing in mind that $[c^1e^2] = [c^2e^1]$, we see that $\tau^*$ is the identity on each of these four elements, and therefore the $E^2$ page of the spectral sequence is the same as the $E^1$ page. Since the differential $d_2$ must raise the Maslov grading by two, the only possible nontrivial differential on the $E^2$ page is $d_2([c^1e^2]\theta^n)$. Let us compute this differential. On the chain level, we have $(1+ \tau^{\#})(c^1e^2) = (c^1e^2 + c^2e^1)$. We observe that $c^1e^2 + c^2e^1 = \partial(a^1b^2)$, so
\begin{align*}
d_2([c^1e^2]\theta^n) &= [(1+\tau^{\#})(a^1b^2)]\theta^{n+1} \\
&=[a^1b^2 + a^2b^1]\theta^{n+2}
\end{align*}
Therefore the $E^3$ page of this spectral sequence is exactly $\mathbb Z_2((\theta))\langle[a^1a^2], [e^1e^2]\rangle$, which is isomorphic to $\left(\widehat{\mathit{HFK}}(S^3,K)\otimes W\right)\otimes \mathbb Z_2((\theta))$ as promised, and unchanged thereafter.
\FloatBarrier
\subsection{The case of the trefoil}
Let us now compute some of the spectral sequence for the trefoil as a doubly-periodic knot with quotient the unknot, using the Heegaard diagrams $\mathcal D$ and $\widetilde{\mathcal D}$ of Figure \ref{Trefoil Heegaard Diagrams Figure}. Recall that $\mathcal D$ is a Heegaard diagram for a link $L$ in $S^3$ consisting of two unknots with linking number $\lambda = 3$ and $\widetilde{\mathcal D}$ is a Heegaard diagram for a link $\widetilde{L}$ in $S^3$ consisting of the left-handed trefoil and the unknotted axis also with linking number $\lambda=3$. We label the twelve intersection points of $\alpha_1$ and $\beta_1$ in $\mathcal D$ as shown in Figure \ref{Trefoil Quotient Generators}, and lift to twenty-four intersection points in $\widetilde{\mathcal D}$ in Figure \ref{Trefoil Generators}. The Alexander gradings and differentials of $\widehat{\mathit{CFL}}(\mathcal D)$ and $\widehat{\mathit{CFK}}(\mathcal D)$ are laid out in the tables of Figure \ref{Trefoil Quotient Complexes}.
\begin{figure}
\centering
\caption{Intersection points in $\mathcal D$.}
\label{Trefoil Quotient Generators}
\begin{tikzpicture}
\tikzstyle{every node}=[font=\small]
\node(2) at (-4,1){$z_1$};
\node(3) at (-1,0){$w_1$};
\tikzstyle{every node}=[font=\tiny]
\node(4) at (-4.5,1.8){$a$};
\node(5) at (-3.8,1.9){$b$};
\node(6) at (-.5,-.8){$h$};
\node(7) at (-1.15,-1){$i$};
\node(8) at (-1.5,-1){$j$};
\node(9) at (-1.5,1.4){$g$};
\node(10) at (-2.1,1.65){$f$};
\node(11) at (-2.1,-.9){$k$};
\node(12) at (-2.65,-.75){$\ell$};
\node(13) at (-2.6,1.8){$e$};
\node(14) at (-3.1,1.9){$c$};
\node(15) at (-3.2,-.55){$m$};
\def \alphacurve {[rotate=-18.5](-2.53,-.32) ellipse (65pt and 28pt)};
\draw [blue][dashed][line width = 2 pt] \alphacurve;
\clip \alphacurve;
\tikzset{every path/.style={line width = 2 pt}};
\draw[red](-4.3,1) ..controls (-4.5,6.5) and (3.4,6.5)..(3.3,.3);
\draw[red](3.3,.3)..controls (3.4, -4.9) and (-3.4,-5)..(-3.3,.7);
\draw[red](-3.3,.7)..controls (-3.4, 5) and (2.4,5)..(2.3, .5);
\draw[red](2.3, .5)..controls (2.4, -3.5) and (-2.4, -3.5)..(-2.3, .4);
\draw[red](-2.3,.4)..controls (-2.4, 3.5) and (1.4, 3.4)..(1.3,.7);
\draw[red](1.3,.7)..controls (1.4,-2.15) and (-1.4, -2.15)..(-1.3,0);
\draw[red](-1.3,0)..controls (-1.4, .5) and (-.7,.5)..(-.7,0);
\draw[red](-.7,0)..controls (-.8,-1.3) and (.8, -1.2)..(.7,.5);
\draw[red](.7,.5)..controls (.8,2.5) and (-1.8, 2.5)..(-1.7,.4);
\draw[red](-1.7,.4)..controls(-1.8,-2.65) and (1.8, -2.65) ..(1.7,.6);
\draw[red](1.7,.6)..controls(1.8,4) and(-2.8,4) ..(-2.7,.5);
\draw[red](-2.7,.5)..controls(-2.8,-4.1) and (2.8,-4.1) ..(2.7,.5);
\draw[red](2.7,.5)..controls (2.85,5.55) and (-3.8,5.75)..(-3.7,.9);
\draw[red](-3.7,.9)..controls (-3.8,.6) and (-4.4,.6) .. (-4.3,1);
\end{tikzpicture}
\end{figure}
\begin{figure}
\caption{Intersection points of $\alpha$ and $\beta$ curves in $\widetilde{\mathcal D}$.}
\label{Trefoil Generators}
\centering
\subfloat{
\begin{tikzpicture}
\tikzstyle{every node}=[font=\small]
\node(1) at (0,.7){$z_0$};
\node(2) at (-4,1){$z_1^1$};
\node(3) at (-1,0){$w_1^1$};
\tikzstyle{every node}=[font=\tiny]
\node(4) at (-4.4,1.7){$a^1$};
\node(5) at (-3.7,1.8){$b^1$};
\node(6) at (-.45,-.7){$h^1$};
\node(7) at (-1.05,-.85){$i^1$};
\node(8) at (-1.5,-.85){$j^1$};
\node(9) at (-1.4,1.3){$g^1$};
\node(10) at (-2,1.55){$f^1$};
\node(11) at (-2.1,-.75){$k^1$};
\node(12) at (-2.55,-.65){$\ell^1$};
\node(13) at (-2.5,1.7){$e^1$};
\node(14) at (-3,1.8){$c^1$};
\node(15) at (-3.1,-.45){$m^1$};
\def \betacurve1{[rotate=-18.5] (-2.53,-.32) ellipse (61pt and 24pt)};
\clip \betacurve1;
\tikzset{every path/.style={line width = 2 pt}}
\draw [rotate=-18.5][blue][dashed](-2.53,-.32) ellipse (60pt and 23pt);
\draw [rotate=-18.5][cyan][dashed] (2.21,1.27) ellipse (60pt and 23pt);
\draw[red](-4.3,1)..controls (-4.4,6.65) and (3.4,6.4)..(3.3,.5);
\draw[red](3.3,.5)..controls (3.3,-4.2) and (-2.3,-3.9)..(-2.3,.2);
\draw[red](-2.3,.2)..controls (-2.4,3.6) and (1.4,3.6)..(1.3,1);
\draw[red](1.3,1)..controls (1.4,.5) and (.7,.5) ..(.7,1);
\draw[red](.7,1)..controls (.8,2.8) and (-1.7,2.5)..(-1.7,.5);
\draw[red](-1.7,.5)..controls (-1.8,-3.2) and (2.7, -3.2)..(2.7,.4);
\draw[red](2.7,.4)..controls (2.7, 5.6) and (-3.85, 5.8) ..(-3.7,.9);
\draw[red](-3.7,.9)..controls (-3.7,.5) and (-4.3,.5) ..(-4.3,1);
\draw[magenta](4.3,0)..controls (4.4,-5.65) and (-3.4,-5.4)..(-3.3,.65);
\draw[magenta](-3.3,.65)..controls (-3.3,4.9) and (2.45,5.25) ..(2.3,.3);
\draw[magenta](2.3,.3)..controls (2.2,-2.45) and (-1.4,-2.55)..(-1.3,0);
\draw[magenta](-1.3,0)..controls (-1.3,.5) and (-.7,.5)..(-.7,0);
\draw[magenta](-.7,0)..controls (-.8,-1.8) and (1.7,-1.5)..(1.7,.6);
\draw[magenta](1.7,.6)..controls (1.8,4.2) and (-2.7,4.2) ..(-2.7,.6);
\draw[magenta](-2.7,.6)..controls (-2.7,-4.6) and (3.7,-4.85)..(3.7,.2);
\draw[magenta](3.7,.2)..controls (3.7,.5) and (4.3,.5)..(4.3,0);
\end{tikzpicture}}
\subfloat{
\begin{tikzpicture}
\tikzstyle{every node}=[font=\small]
\node(4) at (1,1){$w_1^2$};
\node(5) at (4,0){$z_1^2$};
\tikzstyle{every node}=[font=\tiny]
\node(6) at (.6,1.7){$h^2$};
\node(7) at (1.2,1.8){$i^2$};
\node(8) at (1.7,1.8){$j^2$};
\node(9) at (2.3,1.7){$k^2$};
\node(10) at (2.8,1.55){$\ell^2$};
\node(11) at (3.5,1.3){$m^2$};
\node(12) at (4.5,-.65){$a^2$};
\node(13) at (3.8,-.8){$b^2$};
\node(14) at (3.15,-.8){$c^2$};
\node(15) at (2.65,-.7){$e^2$};
\node(12) at (2.1,-.6){$f^2$};
\node(15) at (1.7,-.4){$g^2$};
\def \betacurve2{[rotate=-18.5] (2.21,1.27) ellipse (61pt and 24pt)};
\clip \betacurve2;
\tikzset{every path/.style={line width = 2 pt}}
\draw [rotate=-18.5][blue][dashed](-2.53,-.32) ellipse (60pt and 23pt);
\draw [rotate=-18.5][cyan][dashed] (2.21,1.27) ellipse (60pt and 23pt);
\draw[red](-4.3,1)..controls (-4.4,6.65) and (3.4,6.4)..(3.3,.5);
\draw[red](3.3,.5)..controls (3.3,-4.2) and (-2.3,-3.9)..(-2.3,.2);
\draw[red](-2.3,.2)..controls (-2.4,3.6) and (1.4,3.6)..(1.3,1);
\draw[red](1.3,1)..controls (1.4,.5) and (.7,.5) ..(.7,1);
\draw[red](.7,1)..controls (.8,2.8) and (-1.7,2.5)..(-1.7,.5);
\draw[red](-1.7,.5)..controls (-1.8,-3.2) and (2.7, -3.2)..(2.7,.4);
\draw[red](2.7,.4)..controls (2.7, 5.6) and (-3.85, 5.8) ..(-3.7,.9);
\draw[red](-3.7,.9)..controls (-3.7,.5) and (-4.3,.5) ..(-4.3,1);
\draw[magenta](4.3,0)..controls (4.4,-5.65) and (-3.4,-5.4)..(-3.3,.65);
\draw[magenta](-3.3,.65)..controls (-3.3,4.9) and (2.45,5.25) ..(2.3,.3);
\draw[magenta](2.3,.3)..controls (2.2,-2.45) and (-1.4,-2.55)..(-1.3,0);
\draw[magenta](-1.3,0)..controls (-1.3,.5) and (-.7,.5)..(-.7,0);
\draw[magenta](-.7,0)..controls (-.8,-1.8) and (1.7,-1.5)..(1.7,.6);
\draw[magenta](1.7,.6)..controls (1.8,4.2) and (-2.7,4.2) ..(-2.7,.6);
\draw[magenta](-2.7,.6)..controls (-2.7,-4.6) and (3.7,-4.85)..(3.7,.2);
\draw[magenta](3.7,.2)..controls (3.7,.5) and (4.3,.5)..(4.3,0);
\end{tikzpicture}}
\end{figure}
\begin{figure}
\caption{Alexander gradings and differentials of $\widehat{\mathit{CFL}}(\mathcal D)$ (left) and $\widehat{\mathit{CFK}}(\mathcal D)$ (right).}
\label{Trefoil Quotient Complexes}
\subfloat{\centering
\begin{tikzpicture}
\draw(-3,3)edge(4,3);
\draw(-3,3)edge(-3,-2);
\draw(-3,-2)edge(4,-2);
\draw(4,-2)edge(4,3);
\draw(-3,2)edge(4,2);
\draw(-3,1)edge(4,1);
\draw(-3,0)edge(4,0);
\draw(-3,-1)edge(4,-1);
\draw(-2,3) edge (-2,-2);
\draw(-1,3)edge(-1,-2);
\draw(1,3)edge (1,-2);
\draw(3,3)edge(3,-2);
\draw(-3,3)edge(-2,2);
\node at (-2.7,2.3){$A_2$};
\node at (-2.3,2.7){$A_1$};
\node at (-1.5,2.5){$-\frac{3}{2}$};
\node at (0, 2.5){$-\frac{1}{2}$};
\node at (2, 2.5){$\frac{1}{2}$};
\node at (3.5,2.5){$\frac{3}{2}$};
\node at (-2.5,1.5){$\frac{3}{2}$};
\node at (-2.5,.5){$\frac{1}{2}$};
\node at (-2.5,-.5){$-\frac{1}{2}$};
\node at (-2.5,-1.5){$-\frac{3}{2}$};
\node(1) at (3.5,-.5){$m$};
\node(2) at (3.5,-1.5){$a$};
\node(3) at (2,.5){$k$};
\node(4) at (1.5,-.5){$c$};
\node(5) at (2.5,-.5){$\ell$};
\node(6) at (2,-1.5){$b$};
\node(7) at (0,1.5){$i$};
\node(8) at (-.5,.5){$f$};
\node(9) at (.5,.5){$j$};
\node(10) at (0,-.5){$e$};
\node(11) at (-1.5,1.5){$h$};
\node(12) at (-1.5,.5){$g$};
\end{tikzpicture}}
\hspace{1 cm}
\subfloat{\centering
\begin{tikzpicture}
\draw(-3,3)edge(4,3);
\draw(-3,3)edge(-3,-2);
\draw(-3,-2)edge(4,-2);
\draw(4,-2)edge(4,3);
\draw(-3,2)edge(4,2);
\draw(-2,1)edge(4,1);
\draw(-2,0)edge(4,0);
\draw(-2,-1)edge(4,-1);
\draw(-2,3) edge (-2,-2);
\draw(-1,3)edge(-1,-2);
\draw(1,3)edge (1,-2);
\draw(3,3)edge(3,-2);
\node at (-2.5,2.5){$A_1$};
\node at (-1.5,2.5){$-3$};
\node at (0, 2.5){$-2$};
\node at (2, 2.5){$-1$};
\node at (3.5,2.5){$0$};
\node(1) at (3.5,-.5){$m$};
\node(2) at (3.5,-1.5){$a$};
\node(3) at (2,.5){$k$};
\node(4) at (1.5,-.5){$c$};
\node(5) at (2.5,-.5){$\ell$};
\node(6) at (2,-1.5){$b$};
\node(7) at (0,1.5){$i$};
\node(8) at (-.5,.5){$f$};
\node(9) at (.5,.5){$j$};
\node(10) at (0,-.5){$e$};
\node(11) at (-1.5,1.5){$h$};
\node(12) at (-1.5,.5){$g$};
\path[->,dashed][bend left](1)edge(2);
\path[->,dashed][bend right](1)edge(2);
\path[->,dashed](3)edge(4);
\path[->,dashed](3)edge(5);
\path[->,dashed](4)edge(6);
\path[->,dashed](5)edge(6);
\path[->,dashed](7)edge(8);
\path[->,dashed](7)edge(9);
\path[->,dashed](8)edge(10);
\path[->,dashed](9)edge(10);
\path[->,dashed](11)edge(12);
\end{tikzpicture}}
\end{figure}
There are no differentials on $\widehat{\mathit{CFL}}(\mathcal D)$, so $\widetilde{\mathit{HFL}}(\mathcal D) = \widehat{\mathit{HFL}}(S^3,L)$ has the twelve generators and gradings of Figure \ref{Trefoil Quotient Generators}. From the remainder of that diagram, we observe that the group $\widetilde{\mathit{HFK}}(\mathcal D) = \widehat{\mathit{HFK}}(S^3,U) \otimes W$ is $\mathbb Z_2 \langle [a],[m] \rangle$.
\medskip
Now consider the seventy-two generators of $\widehat{\mathit{CFL}}(\widetilde{\mathcal D})$, whose Alexander $A_1$ and $A_2$ gradings are laid out in Figure \ref{CFL(trefoil) Figure}. It so happens that $\widetilde{\mathcal D}$ is a nice diagram in the sense of Sarkar and Wang \cite{MR2630063}, although the equivariant diagrams for periodic knots introduced in Section \ref{Periodic Knots Section} are not in general, so we may compute $\widetilde{\mathit{HFL}}(\widetilde{\mathcal D})$ with relative ease. The chain complexes in each Alexander $A_1$ grading are shown in Figures \ref{-7/2 Figure}, \ref{-5/2 Figure}, \ref{-3/2 Figure}, \ref{-1/2 Figure}, \ref{1/2 Figure}, \ref{3/2 Figure} and \ref{5/2 Figure} at the close of this section. These convention for these figures is as follows: the generators shown are those in the $A_1$ grading of $\widehat{\mathit{CFL}}(\widetilde{\mathcal D})$, which are also the generators of the $A_1 - \frac{3}{2}$ grading of $\widehat{\mathit{CFK}}(\widetilde{\mathcal D})$. Solid arrows denote differentials that count for the differential $\partial$ and thus exist in both complexes, whereas dashed arrows denote differentials corresponding to disks with nontrivial intersection with the divisor $V_{z_0} = \{z_0\}\times \text{Sym}^{2n_1}(S^2)$. Therefore dashed differentials only count for the knot Floer complex $(\widehat{\mathit{CFK}}(\widetilde{\mathcal D}), \partial_U)$.
\begin{figure}
\caption{Alexander $A_1$ and $A_2$ gradings of the generators of $\widehat{\mathit{CFL}}(\widetilde{D})$. These generate the $E^0$ page of the link Floer homology spectral sequence.}
\label{CFL(trefoil) Figure}
\centering
\begin{tikzpicture}[scale=.9]
\draw(-8,20)edge(7,20);
\draw(-8,20)edge(-8,9);
\draw(7,20)edge(7,9);
\draw(-8,9)edge(7,9);
\draw(-8,19)edge(7,19);
\draw(-8,20)edge(-7,19);
\node at (-7.7,19.3){$A_2$};
\node at (-7.3,19.7){$A_1$};
\draw(-7,20)edge(-7,9);
\node at (-6,19.5){$-\frac{7}{2}$};
\node at (-5.5,18){$ $};
\node at (-6,18.25)[font = \small]{$h^1h^2$};
\node at (-6,15.75)[font = \small]{$g^1g^2$};
\draw(-5,20)edge(-5,9);
\node at (-4,19.5){$-\frac{5}{2}$};
\node at (-4.5,18.25)[font = \small]{$h^1i^2$};
\node at (-3.5,18.25)[font=\small]{$i^1h^2$};
\draw(-8,17.5)edge(7,17.5);
\node at (-4.5, 16.25)[font = \small]{$g^1j^2$};
\node at (-3.5, 16.25)[font=\small]{$j^1g^2$};
\node at (-4.5, 15.75)[font=\small] {$g^1f^2$};
\node at (-3.5, 15.75)[font=\small]{$f^1g^2$};
\node at (-4.5, 15.25)[font=\small]{$h^1e^2$};
\node at (-3.5, 15.25)[font=\small]{$e^1h^2$};
\draw (-3,20)edge(-3,9);
\node at (-7.5,15.75){$\frac{1}{2}$};
\node at (-7.5,18.25){$\frac{3}{2}$};
\node at (-2.5,17)[font=\small]{$f^1f^2$};
\node at (-1.5,17)[font = \small]{$j^1j^2$};
\node at (-2.5,16.5)[font = \small]{$e^1i^2$};
\node at (-1.5,16.5)[font = \small]{$i^1e^2$};
\node at (-2.5,16)[font = \small]{$f^1j^2$};
\node at (-1.5,16)[font = \small]{$j^1f^2$};
\node at (-2.5,15.5)[font = \small]{$g^1k^2$};
\node at (-1.5,15.5)[font = \small]{$k^1g^2$};
\node at (-2.5,15)[font = \small]{$h^1c^2$};
\node at (-1.5,15)[font=\small]{$h^2c^1$};
\node at (-2.5,14.5)[font = \small]{$h^1 \ell^2$};
\node at (-1.5,14.5)[font = \small]{$\ell^1 h^2$};
\node at (-2,19.5)[font = \small]{$-\frac{3}{2}$};
\node at (-2,18.25)[font=\small]{$i^1i^2$};
\draw(-1,20)edge(-1,9);
\draw(-8,14)edge(7,14);
\node at (-.5 ,13.25)[font = \small]{$e^1c^2$};
\node at (.5,13.25)[font=\small]{$c^1e^2$};
\node at (-.5,12.75)[font = \small]{$e^1\ell^2$};
\node at (.5,12.75)[font = \small]{$\ell^1e^2$};
\node at (-.5,12.25)[font = \small]{$b^1f^2$};
\node at (.5, 12.25)[font=\small]{$f^1b^2$};
\node at (-.5,11.75)[font = \small]{$b^1j^2$};
\node at (.5,11.75)[font = \small]{$j^1b^2$};
\node at (-.5,11.25)[font = \small]{$a^1g^2$};
\node at (.5,11.25)[font = \small]{$g^1a^2$};
\draw(1,20)edge(1,9);
\node at (0,19.5){$-\frac{1}{2}$};
\draw(-8,10.5)edge(7,10.5);
\node at (-7.5,12.25){$-\frac{1}{2}$};
\node at (-2,12.5)[font = \small]{$e^1e^2$};
\node at (-2.5,12)[font=\small]{$b^1g^2$};
\node at (-1.5,12)[font=\small]{$g^1b^2$};
\node at (-.5,16.75)[font=\small]{$f^1k^2$};
\node at (.5,16.75)[font=\small]{$k^1f^2$};
\node at (-.5,16.25)[font=\small]{$j^1k^2$};
\node at (.5,16.25)[font=\small]{$k^1j^2$};
\node at (-.5,15.75)[font = \small]{$i^1\ell^2$};
\node at (.5,15.75)[font=\small]{$\ell^1i^2$};
\node at (-.5,15.25)[font=\small]{$c^1i^2$};
\node at (.5,15.25)[font=\small]{$i^1c^2$};
\node at (-.5,14.75)[font = \small]{$h^1m^2$};
\node at (.5,14.75)[font = \small]{$m^1h^2$};
\draw(3,20)edge(3,9);
\node at (2,19.5) {$\frac{1}{2}$};
\draw(5,20)edge(5,9);
\node at (4,19.5) {$\frac{3}{2}$};
\node at (6,19.5){$\frac{5}{2}$};
\node at (6,12.25)[font = \small]{$m^1m^2$};
\node at (2,16)[font=\small]{$k^1k^2$};
\node at (1.5,15.5)[font = \small]{$i^1m^2$};
\node at (2.5,15.5)[font = \small]{$m^1i^2$};
\node at (3.5,12.75)[font = \small]{$c^1m^2$};
\node at (4.5,12.75)[font = \small]{$m^1c^2$};
\node at (3.5,12.25)[font = \small]{$\ell^1 m^2$};
\node at (4.5,12.25)[font = \small]{$m^1 \ell^2$};
\node at (3.5,11.75)[font=\small]{$a^1k^2$};
\node at (4.5,11.75)[font=\small]{$k^1a^2$};
\node at (-7.5,9.75) {$-\frac{3}{2}$};
\node at (6,9.75)[font = \small]{$a^1a^2$};
\node at (3.5,9.75)[font = \small]{$a^1b^2$};
\node at (4.5,9.75)[font = \small]{$b^1a^2$};
\node at (2,9.75)[font=\small]{$b^1b^2$};
\node at (1.5, 12.5)[font = \small]{$b^1k^2$};
\node at (2.5, 12.5)[font = \small]{$k^1b^2$};
\node at (1.5,12)[font = \small]{$a^1f^2$};
\node at (2.5,12)[font = \small]{$f^1a^2$};
\node at (1.5,13.5)[font = \small]{$c^1c^2$};
\node at (2.5,13.5)[font = \small]{$\ell^1\ell^2$};
\node at (1.5,13)[font = \small]{$c^1 \ell^2$};
\node at (2.5,13)[font = \small]{$\ell^1 c^2$};
\node at (1.5,11.5)[font = \small]{$e^1 m^2$};
\node at (2.5,11.5)[font = \small]{$m^1e^2$};
\node at (1.5,11)[font = \small]{$a^1j^2$};
\node at (2.5,11)[font = \small]{$j^1a^2$};
\end{tikzpicture}
\end{figure}
\medskip
The link Floer homology spectral sequence associated to $\widetilde{\mathcal D}$ arises from the double complex $(\widehat{\mathit{CFL}}(\widetilde{\mathcal D}), \partial + (1 + \tau^{\#})\theta)$.
Computing homology of $\widehat{\mathit{CFL}}(\widetilde{\mathcal D})$ with respect to the differential $\partial$, we obtain a set of generators for the $E^1$ page of the spectral sequence, which is $\widetilde{\mathit{HFL}}(\widetilde{\mathcal D}) \otimes \mathbb Z_2((\theta)) = (\widehat{\mathit{HFL}}(S^3, 3_1 \cup U) \otimes V_1) \otimes \mathbb Z_2((\theta))$. These generators and their gradings may be found in Figure \ref{HFL(trefoil) figure}. Whenever an element of $\widetilde{\mathit{HFK}}(\widetilde{\mathcal D})$ is invariant under the involution $\tau^*$ but has no representative which is invariant under the chain map $\tau^{\#}$, we have included two representatives of that element to make the $\tau^*$ invariance clear. (For example, observe that $[h^1c^2] = [c^1h^2]$ is invariant under $\tau^*$.)
\begin{figure}
\caption{The homology $\widetilde{\mathit{HFL}}(\widetilde{\mathcal D})$. These elements generate the $E^1$ page of the link Floer spectral sequence as a $\mathbb Z_2((\theta))$ module.}
\label{HFL(trefoil) figure}
\centering
\begin{tikzpicture}[scale=.9]
\draw(-7.75,20)edge(8.25,20);
\draw(-7.75,20)edge(-7.75,9);
\draw(8.25,20)edge(8.25,9);
\draw(-7.75,9)edge(8.25,9);
\draw(-7.75,19)edge(8.25,19);
\draw(-7.75,20)edge(-6.75,19);
\draw(-6.75,20)edge(-6.75,9);
\draw(-5.25,20)edge(-5.25,9);
\draw(-7.75,17.5)edge(8.25,17.5);
\draw (-3,20)edge(-3,9);
\draw(-.5,20)edge(-.5,9);
\draw(-7.75,14)edge(8.25,14);
\draw(1.75,20)edge(1.75,9);
\draw(-7.75,10.5)edge(8.25,10.5);
\draw(4.25,20)edge(4.25,9);
\draw(6.75,20)edge(6.75,9);
\node at (-7.5,19.3){$A_2$};
\node at (-7.1,19.7){$A_1$};
\node at (-6,19.5){$-\frac{7}{2}$};
\node at (-6,18.25)[font = \small]{$[h^1h^2]$};
\node at (-6,15.75)[font = \small]{$[g^1g^2]$};
\node at (-4.2,19.5){$-\frac{5}{2}$};
\node at (-4.65,18.25)[font = \small]{$[h^1i^2]$};
\node at (-3.65,18.25)[font=\small]{$[i^1h^2]$};
\node at (-4.15, 16.25)[font = \small]{$[g^1j^2+f^1g^2]$};
\node at (-4.15, 15.75)[font=\small]{$[j^1g^2+g^1f^2]$};
\node at (-7.25,15.75){$\frac{1}{2}$};
\node at (-7.25,18.25){$\frac{3}{2}$};
\node at (-1.75,16.5)[font=\small]{$[f^1f^2 + k^1g^2 +$};
\node at (-1.75,16)[font = \small]{$ g^1k^2 + j^1j^2]$};
\node at (-1.75,15.25)[font = \small]{$[h^1c^2] = [c^1h^2]$};
\node at (-1.75,19.5)[font = \small]{$-\frac{3}{2}$};
\node at (-1.75,18.25)[font=\small]{$[i^1i^2]$};
\node at (.1,12.25)[font = \small]{$[a^1g^2]$};
\node at (1.2,12.25)[font = \small]{$[g^1a^2]$};
\node at (.6,19.5){$-\frac{1}{2}$};
\node at (-7.25,12.25){$-\frac{1}{2}$};
\node at (-1.75,12.25)[font=\small]{$[b^1g^2+g^1b^2]$};
\node at (1.2,15.75)[font=\small]{$[c^1i^2]$};
\node at (.1,15.75)[font=\small]{$[i^1c^2]$};
\node at (3,19.5) {$\frac{1}{2}$};
\node at (5.4,19.5) {$\frac{3}{2}$};
\node at (7.5,19.5){$\frac{5}{2}$};
\node at (7.5,12.25)[font = \small]{$[m^1m^2]$};
\node at (3,15.75)[font = \small]{$[i^1m^2] = [m^1i^2]$};
\node at (5.5,12.5)[font = \small]{$[c^1m^2+m^1\ell^2]$};
\node at (5.5,12)[font = \small]{$[m^1c^2+\ell^1 m^2]$};
\node at (-7.25,9.75) {$-\frac{3}{2}$};
\node at (7.5,9.75)[font = \small]{$[a^1a^2]$};
\node at (4.9,9.75)[font = \small]{$[a^1b^2]$};
\node at (6,9.75)[font = \small]{$[b^1a^2]$};
\node at (3,9.75)[font=\small]{$[b^1b^2]$};
\node at (3,11.5)[font = \small]{$[a^1f^2]=[f^1a^2]$};
\node at (3,13)[font = \small]{$[c^1c^2+e^1 m^2 +$};
\node at (3,12.5)[font = \small]{$m^1e^2 + \ell^1 \ell^2]$};
\end{tikzpicture}
\end{figure}
\medskip
The differential $d_1$ on the $E_1$ page of the link Floer spectral sequence for $\widetilde{\mathcal D}$ is $(1 + \tau^*)\theta$; in particular, computing the homology of $d_1$ has the effect of killing all elements of $\widetilde{\mathit{HFL}}(\mathcal D)$ not invariant under $\tau^*$. Ergo we see that the $E^2$ page of this spectral sequence is generated as a $\mathbb Z_2((\theta))$ module by the elements of Figure \ref{HFL(E2) Figure}. Notice that the ranks of the $E^2$ page in each Alexander grading $A_1 = 2k + \frac{1}{2}$ correspond precisely to the ranks of each Alexander grading $A_1 = k + \frac{1}{2}$ of Figure \ref{Trefoil Quotient Complexes}, which is the link Floer homology $\widetilde{\mathit{HFL}}(\mathcal D)$. Therefore the link Floer homology spectral sequence converges on the $E^2$ page.
\begin{figure}
\caption{Generators for the $E^2 = E^{\infty}$ page of the link Floer spectral sequence for $\widetilde{\mathcal D}$ as a $\mathbb Z_2((\theta))$-module.}
\label{HFL(E2) Figure}
\centering
\begin{tikzpicture}[scale=.9]
\draw(-7.75,20)edge(8.25,20);
\draw(-7.75,20)edge(-7.75,9);
\draw(8.25,20)edge(8.25,9);
\draw(-7.75,9)edge(8.25,9);
\draw(-7.75,19)edge(8.25,19);
\draw(-7.75,20)edge(-6.75,19);
\draw(-6.75,20)edge(-6.75,9);
\draw(-5.25,20)edge(-5.25,9);
\draw(-7.75,17.5)edge(8.25,17.5);
\draw (-3,20)edge(-3,9);
\draw(-.5,20)edge(-.5,9);
\draw(-7.75,14)edge(8.25,14);
\draw(1.75,20)edge(1.75,9);
\draw(-7.75,10.5)edge(8.25,10.5);
\draw(4.25,20)edge(4.25,9);
\draw(6.75,20)edge(6.75,9);
\node at (-7.5,19.3){$A_2$};
\node at (-7.1,19.7){$A_1$};
\node at (-6,19.5){$-\frac{7}{2}$};
\node at (-6,18.25)[font = \small]{$[h^1h^2]$};
\node at (-6,15.75)[font = \small]{$[g^1g^2]$};
\node at (-4.2,19.5){$-\frac{5}{2}$};
\node at (-7.25,15.75){$\frac{1}{2}$};
\node at (-7.25,18.25){$\frac{3}{2}$};
\node at (-1.75,16.5)[font=\small]{$[f^1f^2 + k^1g^2 +$};
\node at (-1.75,16)[font = \small]{$ g^1k^2 + j^1j^2]$};
\node at (-1.75,15.25)[font = \small]{$[h^1c^2] = [c^1h^2]$};
\node at (-1.75,19.5)[font = \small]{$-\frac{3}{2}$};
\node at (-1.75,18.25)[font=\small]{$[i^1i^2]$};
\node at (.6,19.5){$-\frac{1}{2}$};
\node at (-7.25,12.25){$-\frac{1}{2}$};
\node at (-1.75,12.25)[font=\small]{$[b^1g^2+g^1b^2]$};
\node at (3,19.5) {$\frac{1}{2}$};
\node at (5.4,19.5) {$\frac{3}{2}$};
\node at (7.5,19.5){$\frac{5}{2}$};
\node at (7.5,12.25)[font = \small]{$[m^1m^2]$};
\node at (3,15.75)[font = \small]{$[i^1m^2] = [m^1i^2]$};
\node at (-7.25,9.75) {$-\frac{3}{2}$};
\node at (3,9.75)[font=\small]{$[b^1b^2]$};
\node at (3,11.5)[font = \small]{$[a^1f^2]=[f^1a^2]$};
\node at (3,13)[font = \small]{$[c^1c^2+e^1 m^2 +$};
\node at (3,12.5)[font = \small]{$m^1e^2 + \ell^1 \ell^2]$};
\node at (7.5,9.75)[font = \small]{$[a^1a^2]$};
\end{tikzpicture}
\end{figure}
\medskip
We now turn our attention to the knot Floer homology spectral sequence for $\widetilde{\mathcal D}$, which arises from the double complex $(\widehat{\mathit{CFK}}(\mathcal D) \otimes \mathbb Z_2((\theta)), \partial_U + (1 + \tau^{\#})\theta))$. The Alexander $A_1$ gradings of the seventy-two generators in $\widehat{\mathit{CFK}}(\widetilde{\mathcal D})$ are laid out in Figure \ref{CFK(trefoil) Figure}. Notice that these gradings are exactly the $A_1$ gradings of $\widehat{\mathit{CFL}}(\widetilde{\mathcal D})$ shifted downward by $\frac{\ell k(3_1, U)}{2} = \frac{3}{2}$. These elements generate the $E^0$ page of the link Floer spectral sequence as a $\mathbb Z_2((\theta))$ module.
\begin{figure}
\caption{Alexander $A_1$ gradings of elements of $\widehat{\mathit{CFK}}(\widetilde{\mathcal D})$. These elements generate the $E^0$ page of the link Floer spectral sequence as a $\mathbb Z_2((\theta))$ module.}
\centering
\begin{tikzpicture}[scale=.9]
\draw(-8,20)edge(7,20);
\draw(-8,20)edge(-8,9);
\draw(7,20)edge(7,9);
\draw(-8,9)edge(7,9);
\draw(-8,19)edge(7,19);
\draw(-7,20)edge(-7,9);
\draw(-5,20)edge(-5,9);
\draw(-7,17.5)edge(7,17.5);
\draw (-3,20)edge(-3,9);
\draw(-1,20)edge(-1,9);
\draw(-7,14)edge(7,14);
\draw(1,20)edge(1,9);
\draw(-7,10.5)edge(7,10.5);
\draw(3,20)edge(3,9);
\draw(5,20)edge(5,9);
\node at (-7.5,19.5){$A_1$};
\node at (-6,19.5){$-5$};
\node at (-6,18.25)[font = \small]{$h^1h^2$};
\node at (-6,15.75)[font = \small]{$g^1g^2$};
\node at (-4,19.5){$-4$};
\node at (-4.5,18.25)[font = \small]{$h^1i^2$};
\node at (-3.5,18.25)[font=\small]{$i^1h^2$};
\node at (-4.5, 16.25)[font = \small]{$g^1j^2$};
\node at (-3.5, 16.25)[font=\small]{$j^1g^2$};
\node at (-4.5, 15.75)[font=\small] {$g^1f^2$};
\node at (-3.5, 15.75)[font=\small]{$f^1g^2$};
\node at (-4.5, 15.25)[font=\small]{$h^1e^2$};
\node at (-3.5, 15.25)[font=\small]{$e^1h^2$};
\node at (-2.5,17)[font=\small]{$f^1f^2$};
\node at (-1.5,17)[font = \small]{$j^1j^2$};
\node at (-2.5,16.5)[font = \small]{$e^1i^2$};
\node at (-1.5,16.5)[font = \small]{$i^1e^2$};
\node at (-2.5,16)[font = \small]{$f^1j^2$};
\node at (-1.5,16)[font = \small]{$j^1f^2$};
\node at (-2.5,15.5)[font = \small]{$g^1k^2$};
\node at (-1.5,15.5)[font = \small]{$k^1g^2$};
\node at (-2.5,15)[font = \small]{$h^1c^2$};
\node at (-1.5,15)[font=\small]{$h^2c^1$};
\node at (-2.5,14.5)[font = \small]{$h^1 \ell^2$};
\node at (-1.5,14.5)[font = \small]{$\ell^1 h^2$};
\node at (-2,19.5)[font = \small]{$-3$};
\node at (-2,18.25)[font=\small]{$i^1i^2$};
\node at (-.5 ,13.25)[font = \small]{$e^1c^2$};
\node at (.5,13.25)[font=\small]{$c^1e^2$};
\node at (-.5,12.75)[font = \small]{$e^1\ell^2$};
\node at (.5,12.75)[font = \small]{$\ell^1e^2$};
\node at (-.5,12.25)[font = \small]{$b^1f^2$};
\node at (.5, 12.25)[font=\small]{$f^1b^2$};
\node at (-.5,11.75)[font = \small]{$b^1j^2$};
\node at (.5,11.75)[font = \small]{$j^1b^2$};
\node at (-.5,11.25)[font = \small]{$a^1g^2$};
\node at (.5,11.25)[font = \small]{$g^1a^2$};
\node at (0,19.5){$-2$};
\node at (-2,12.5)[font = \small]{$e^1e^2$};
\node at (-2.5,12)[font=\small]{$b^1g^2$};
\node at (-1.5,12)[font=\small]{$g^1b^2$};
\node at (-.5,16.75)[font=\small]{$f^1k^2$};
\node at (.5,16.75)[font=\small]{$k^1f^2$};
\node at (-.5,16.25)[font=\small]{$j^1k^2$};
\node at (.5,16.25)[font=\small]{$k^1j^2$};
\node at (-.5,15.75)[font = \small]{$i^1\ell^2$};
\node at (.5,15.75)[font=\small]{$\ell^1i^2$};
\node at (-.5,15.25)[font=\small]{$c^1i^2$};
\node at (.5,15.25)[font=\small]{$i^1c^2$};
\node at (-.5,14.75)[font = \small]{$h^1m^2$};
\node at (.5,14.75)[font = \small]{$m^1h^2$};
\node at (2,19.5) {$-1$};
\node at (4,19.5) {$0$};
\node at (6,19.5){$1$};
\node at (6,12.25)[font = \small]{$m^1m^2$};
\node at (2,16)[font=\small]{$k^1k^2$};
\node at (1.5,15.5)[font = \small]{$i^1m^2$};
\node at (2.5,15.5)[font = \small]{$m^1i^2$};
\node at (3.5,12.75)[font = \small]{$c^1m^2$};
\node at (4.5,12.75)[font = \small]{$m^1c^2$};
\node at (3.5,12.25)[font = \small]{$\ell^1 m^2$};
\node at (4.5,12.25)[font = \small]{$m^1 \ell^2$};
\node at (3.5,11.75)[font=\small]{$a^1k^2$};
\node at (4.5,11.75)[font=\small]{$k^1a^2$};
\node at (6,9.75)[font = \small]{$a^1a^2$};
\node at (3.5,9.75)[font = \small]{$a^1b^2$};
\node at (4.5,9.75)[font = \small]{$b^1a^2$};
\node at (2,9.75)[font=\small]{$b^1b^2$};
\node at (1.5, 12.5)[font = \small]{$b^1k^2$};
\node at (2.5, 12.5)[font = \small]{$k^1b^2$};
\node at (1.5,12)[font = \small]{$a^1f^2$};
\node at (2.5,12)[font = \small]{$f^1a^2$};
\node at (1.5,13.5)[font = \small]{$c^1c^2$};
\node at (2.5,13.5)[font = \small]{$\ell^1\ell^2$};
\node at (1.5,13)[font = \small]{$c^1 \ell^2$};
\node at (2.5,13)[font = \small]{$\ell^1 c^2$};
\node at (1.5,11.5)[font = \small]{$e^1 m^2$};
\node at (2.5,11.5)[font = \small]{$m^1e^2$};
\node at (1.5,11)[font = \small]{$a^1j^2$};
\node at (2.5,11)[font = \small]{$j^1a^2$};
\end{tikzpicture}
\label{CFK(trefoil) Figure}
\end{figure}
\medskip
As before, the chain complexes in each Alexander grading may be found in Figures Figures \ref{-7/2 Figure}, \ref{-5/2 Figure}, \ref{-3/2 Figure}, \ref{-1/2 Figure}, \ref{1/2 Figure}, \ref{3/2 Figure} and \ref{5/2 Figure}. Computing the homology of these complexes, we obtain $\widetilde{\mathit{HFK}}(\widetilde{\mathcal D}) = \widehat{\mathit{HFK}}(3_1)\otimes V_1 \otimes W$, whose generators are described in Figure \ref{HFK(trefoil) Figure}. These elements generate the $E^1$ page of the knot Floer homology spectral sequence of $\widetilde{\mathcal D}$ as a $\mathbb Z_2((q))$-module. Once again, homology classes which are equivalent under the induced involution $\tau^*$ but have no representative which is invariant under the chain map $\tau^{\#}$ have been included with two equivalent descriptions to emphasize their invariance.
\begin{figure}
\caption{The homology $\widetilde{\mathit{HFK}}(\widetilde{\mathcal D})$, which is $\widehat{\mathit{HFK}}(3_1)\otimes V_1 \otimes W$. These elements generate the $E^1$ page of the knot Floer homology spectral sequence of $\widetilde{\mathcal D}$ as a $\mathbb Z_2((q))$-module.}
\label{HFK(trefoil) Figure}
\centering
\begin{tikzpicture}[scale=.9]
\draw(-8,18)edge(6,18);
\draw(-8,18)edge(-8,9);
\draw(6,18)edge(6,9);
\draw(-8,9)edge(6,9);
\draw(-8,18)edge(6,18);
\draw(-7,18)edge(-7,9);
\draw(-6,18)edge(-6,9);
\draw(-8,17)edge(6,17);
\draw (-5,18)edge(-5,9);
\draw(-4,18)edge(-4,9);
\draw(-7,14)edge(6,14);
\draw(-1.5,18)edge(-1.5,9);
\draw(-7,10.5)edge(6,10.5);
\draw(1,18)edge(1,9);
\draw(3.5,18)edge(3.5,9);
\node at (-7.5,17.5){$A_1$};
\node at (-6.5,17.5){$-5$};
\node at (-5.5,17.5){$-4$};
\node at (-4.5,17.5)[font = \small]{$-3$};
\node at (-2.75, 12.25)[font=\small]{$[a^1g^2]=[g^1a^2]$};
\node at (-2.75,17.5){$-2$};
\node at (-2.75,15.5)[font = \small]{$[h^1m^2+m^1h^2]$};
\node at (-.25,17.5) {$-1$};
\node at (2.25,17.5) {$0$};
\node at (4.75,17.5){$1$};
\node at (4.75,12.25)[font = \small]{$[m^1m^2]$};
\node at (-.25,15.75)[font = \small]{$[i^1m^2+m^1e^2]$};
\node at (-.25,15.25)[font = \small]{$[m^1i^2+e^1m^2]$};
\node at (2.25,12.5)[font = \small]{$[\ell^1 m^2+m^1c^2]$};
\node at (2.25,12)[font = \small]{$[m^1 \ell^2 + c^1m^2]$};
\node at (4.75,9.75)[font = \small]{$[a^1a^2]$};
\node at (1.75,9.75)[font = \small]{$[a^1b^2]$};
\node at (2.75,9.75)[font = \small]{$[b^1a^2]$};
\node at (-.8,12.25)[font = \small]{$[a^1f^2]$};
\node at (.3,12.25)[font = \small]{$[f^1a^2]$};
\end{tikzpicture}
\end{figure}
\medskip
The differential $d_1 = (1 + \tau^*)\theta$ on the $E^1$ page of the spectral sequence has the effect of eliminating all elements of $\widetilde{\mathit{HFK}}(\widetilde{\mathcal D})$ which are not invariant under the action of $\tau^*$. Computing homology with respect to $d_1$ yields the set of generators of Figure \ref{E2 trefoil knot figure}.
\begin{figure}
\caption{Generators for the $E^2$ page of the knot Floer spectral sequence associated to $\widetilde{\mathcal D}$ as a $\mathbb Z_2((q))$ module.}
\label{E2 trefoil knot figure}
\begin{tikzpicture}[scale=.9]
\draw(-8,18)edge(3,18);
\draw(-8,18)edge(-8,9);
\draw(3,18)edge(3,9);
\draw(-8,9)edge(3,9);
\draw(-7,18)edge(-7,9);
\draw(-6,18)edge(-6,9);
\draw(-8,17)edge(3,17);
\draw (-5,18)edge(-5,9);
\draw(-4,18)edge(-4,9);
\draw(-7,14)edge(3,14);
\draw(-1.5,18)edge(-1.5,9);
\draw(-7,10.5)edge(3,10.5);
\draw(-.5,18)edge(-.5,9);
\draw(.5,18)edge(.5,9);
\node at (-7.5,17.5){$A_1$};
\node at (-6.5,17.5){$-5$};
\node at (-5.5,17.5){$-4$};
\node at (-4.5,17.5)[font = \small]{$-3$};
\node at (-2.75, 12.25)[font=\small]{$[a^1g^2]=[g^1a^2]$};
\node at (-2.75,17.5){$-2$};
\node at (-2.75,15.5)[font = \small]{$[h^1m^2+m^1h^2]$};
\node at (-1,17.5) {$-1$};
\node at (0,17.5) {$0$};
\node at (1.75,17.5){$1$};
\node at (1.75,12.25)[font = \small]{$[m^1m^2]$};
\node at (1.75,9.75)[font = \small]{$[a^1a^2]$};
\end{tikzpicture}
\end{figure}
\medskip
The knot Floer spectral sequence stabilizes on the $E^2$ page in every Alexander grading except $A_1 = -2$. There is a single nontrivial $d_2$ differential which behaves similarly to the nontrivial $d_2$ differential we earlier saw in the case of the unknot as a doubly-periodic knot. In particular: consider $d_2([a^1g^2])$. We compute this differential as follows: first, apply $1+\tau^{\#}$ to $a^1g^2$, obtaining $(a^1g^2 + g^1a^2)\theta$. Next, we choose an element whose boundary under $\partial_U$ is $(a^1g^2 + g^1a^2)\theta$; one such is $(h^1m^2)\theta$. Then $d_2([a^1g^2]) = [1+\tau^{\#}(h^1m^2)]\theta) = [h^1m^2 + m^1h^2]\theta^2$. Moreover, we then have $d_2([a^1g^2]\theta^n)=[h^1m^2 + m^1h^2]\theta^{n+2}$ in general. Therefore the Alexander grading $-2$ vanishes on the $E^3$ page of the knot Floer spectral sequence, and the $E^3 = E^{\infty}$ page is isomorphic to $\widetilde{\mathit{HFK}}(\mathcal D)$ after an appropriate shift and rescaling in Alexander $A_1$ gradings. Generators for this page of the knot Floer spectral sequence appear in Figure \ref{E3(trefoil) Figure}.
\begin{figure}
\caption{Generators for the $E^3 = E^{\infty}$ page of the knot Floer spectral sequence associated to $\mathcal{\widetilde{D}}$.}
\label{E3(trefoil) Figure}
\centering
\begin{tikzpicture}[scale=.9]
\draw(-8,15)edge(1.5,15);
\draw(-8,15)edge(-8,9);
\draw(1.5,15)edge(1.5,9);
\draw(-8,9)edge(1.5,9);
\draw(-7,15)edge(-7,9);
\draw(-6,15)edge(-6,9);
\draw (-5,15)edge(-5,9);
\draw(-4,15)edge(-4,9);
\draw(-7,14)edge(1.5,14);
\draw(-3,15)edge(-3,9);
\draw(-7,10.5)edge(1.5,10.5);
\draw(-2,15)edge(-2,9);
\draw(-1,15)edge(-1,9);
\node at (-7.5,14.5){$A_1$};
\node at (-6.5,14.5){$-5$};
\node at (-5.5,14.5){$-4$};
\node at (-4.5,14.5)[font = \small]{$-3$};
\node at (-3.5,14.5){$-2$};
\node at (-2.5,14.5) {$-1$};
\node at (-1.5,14.5) {$0$};
\node at (.25,14.5){$1$};
\node at (.25,12.25)[font = \small]{$[m^1m^2]$};
\node at (.25,9.75)[font = \small]{$[a^1a^2]$};
\end{tikzpicture}
\end{figure}
\medskip
Notice that for the left-handed trefoil considered as a two-periodic knot, Edmonds' condition is sharp: $g(3_1) = 1 = 2(0) + \frac{3 - 1}{2} = 2g(U) + \frac{\lambda -1}{2}$. We also see here the realization of Corollary \ref{Fiberedness Corollary}: the left-handed trefoil is fibred, and the highest nontrivial Alexander grading in $\widetilde{\mathit{HFK}}(\widetilde{\mathcal D})$, namely $A_1 = 1$ has rank two. The two generators in this Alexander grading, $[a_1a_2]$ and $[m_1m_2]$, are preserved over the course of the spectral sequence and become the two generators of $\widetilde{\mathit{HFK}}(\mathcal D)$ in grading $A_1 = 0$ under the isomorphism between the $E^{\infty}$ page of the knot Floer spectral sequence and $\widetilde{\mathit{HFK}}(\mathcal D)$. Then the highest nontrivial Alexander grading of $\widetilde{\mathit{HFK}}(\mathcal D)$, $A_1 = 0$, also has rank two, corresponding to the unknot being a fibred knot.
\begin{figure}
\caption{The chain complex $\widehat{\mathit{CFL}}(\widetilde{\mathcal D})$ in Alexander grading $A_1 = -\frac{7}{2}$, and the chain complex $\widehat{\mathit{CFK}}(\widetilde{\mathcal D})$ in Alexander grading $A_1 = -5$. Dashed arrows denote differentials appearing only in the latter.}
\label{-7/2 Figure}
\centering
\begin{tikzpicture}[node distance = 2cm, auto]
\node(1){$h^1h^2$};
\node(2)[below of = 1]{$g^1g^2$};
\path[->,dashed](1)edge(2);
\end{tikzpicture}
\end{figure}
\begin{figure}
\caption{The chain complex $\widehat{\mathit{CFL}}(\widetilde{\mathcal D})$ in Alexander grading $A_1 = -\frac{5}{2}$, and the chain complex $\widehat{\mathit{CFK}}(\widetilde{\mathcal D})$ in Alexander grading $A_1 = -4$. Dashed arrows denote differentials appearing only in the latter.}
\label{-5/2 Figure}
\centering
\begin{tikzpicture}[node distance = 2cm, auto]
\node(1){$h^1i^2$};
\node(2)[below left = 1.5 cm and .3 cm of 1]{$g^1j^2$};
\node(3)[below right = 1.5 cm and .3 cm of 1]{$f^1g^2$};
\node(4)[below = 3.4 cm of 1]{$e^1h^2$};
\path[->,dashed](1)edge(2);
\path[->,dashed](1)edge(3);
\path[->](2)edge(4);
\path[->](3)edge(4);
\node(5)[right of = 1, node distance = 5 cm]{$i^1h^2$};
\node(6)[below left = 1.5 cm and .3 cm of 5]{$g^1f^2$};
\node(7)[below right = 1.5 cm and .3 cm of 5]{$j^1g^2$};
\node(8)[below = 3.4 cm of 5]{$h^1e^2$};
\path[->,dashed](5)edge(6);
\path[->,dashed](5)edge(7);
\path[->](6)edge(8);
\path[->](7)edge(8);
\end{tikzpicture}
\end{figure}
\begin{figure}
\caption{The chain complex $\widehat{\mathit{CFL}}(\widetilde{\mathcal D})$ in Alexander grading $A_1 = -\frac{3}{2}$, and the chain complex $\widehat{\mathit{CFK}}(\widetilde{\mathcal D})$ in Alexander grading $A_1 = -3$. Dashed arrows denote differentials appearing only in the latter.}
\label{-3/2 Figure}
\centering
\begin{tikzpicture}[node distance = 2 cm, auto]
\node(1){$i^1i^2$};
\node(2)[below left = 1.5 cm and .5 cm of 1]{$k^1g^2$};
\node(3)[below right = 1.5 cm and .5 cm of 1]{$g^1k^2$};
\node(4)[left of = 2]{$f^1f^2$};
\node(5)[right of = 3]{$j^1j^2$};
\path[->, dashed](1)edge(2);
\path[->, dashed](1)edge(3);
\path[->, dashed](1)edge(4);
\path[->, dashed](1)edge(5);
\node(6)[below = 3.5 cm of 1]{$e^1e^2$};
\node(7)[left of = 6]{$c^1h^2$};
\node(9)[left of = 7]{$h^1c^2$};
\node(8)[right of = 6]{$\ell^1h^2$};
\node(10)[right of = 8]{$h^1\ell^2$};
\path[->, dashed](4)edge(6);
\path[->](4)edge(9);
\path[->](4)edge(7);
\path[->, dashed](5)edge(6);
\path[->](5)edge(8);
\path[->](5)edge(10);
\path[->](2)edge(9);
\path[->](2)edge(8);
\path[->](3)edge(7);
\path[->](3)edge(10);
\node(11)[below = 3.5 cm of 2]{$g^1b^2$};
\node(12)[below = 3.5 cm of 3]{$b^1g^2$};
\path[->](6)edge(11);
\path[->](6)edge(12);
\path[->, dashed](9)edge(11);
\path[->, dashed](7)edge(12);
\path[->, dashed](8)edge(11);
\path[->, dashed](10)edge(12);
\node(13)[below of = 11]{$e^1i^2$};
\node(14)[below of = 12]{$i^1e^2$};
\node(15)[below of = 13]{$f^1j^2$};
\node(16)[below of = 14]{$j^1f^2$};
\path[->](13)edge(15);
\path[->](14)edge(16);
\end{tikzpicture}
\end{figure}
\begin{figure}
\caption{The chain complex $\widehat{\mathit{CFL}}(\widetilde{\mathcal D})$ in Alexander grading $A_1 = -\frac{1}{2}$, and the chain complex $\widehat{\mathit{CFK}}(\widetilde{\mathcal D})$ in Alexander grading $A_1 = -2$. Dashed arrows denote differentials appearing only in the latter.}
\label{-1/2 Figure}
\centering
\rotatebox{90}{
\begin{tikzpicture}[node distance = 2 cm, auto]
\node(1){$k^1j^2$};
\node(2)[right of = 1, node distance = 4 cm]{$f^1k^2$};
\node(3)[right of = 2, node distance = 4 cm]{$k^1f^2$};
\node(4)[right of = 3, node distance = 4 cm]{$j^1k^2$};
\node(9)[below left = 3 cm and .02 cm of 2]{$h^1m^2$};
\node(12)[below right = 3 cm and .02 cm of 3]{$m^1h^2$};
\node(7)[right of = 9]{$\ell^1e^2$};
\node(8)[left of = 12]{$e^1\ell^2$};
\node(11)[left of = 9]{$\ell^1i^2$};
\node(5)[left of = 11]{$c^1i^2$};
\node(10)[left of = 5]{$e^1c^2$};
\node(14)[right of = 12]{$i^1\ell^2$};
\node(6)[right of = 14]{$i^1c^2$};
\node(13)[right of = 6]{$c^1e^2$};
\path[->](1)edge(11);
\path[->, dashed](1)edge(10);
\path[->](1)edge(9);
\path[->](2)edge(9);
\path[->](2)edge(5);
\path[->, dashed](2)edge(8);
\path[->, dashed](3)edge(7);
\path[->](3)edge(6);
\path[->](3)edge(12);
\path[->](4)edge(12);
\path[->,dashed](4)edge(13);
\path[->](4)edge(14);
\node(15)[below right = 7 cm and .05 cm of 2]{$b^1j^2$};
\node(16)[below left = 7 cm and .05 cm of 3]{$j^1b^2$};
\node(17)[left of = 15, node distance = 2.2 cm]{$a^1g^2$};
\node(18)[left of = 17, node distance = 2.2 cm]{$f^1b^2$};
\node(19)[right of = 16, node distance = 2.2 cm]{$g^1a^2$};
\node(20)[right of = 19, node distance = 2.2 cm]{$b^1f^2$};
\path[->, dashed](11)edge(18);
\path[->, dashed](11)edge(19);
\path[->](10)edge(18);
\path[->](10)edge(17);
\path[->, dashed](9)edge(17);
\path[->, dashed](9)edge(19);
\path[->, dashed](5)edge(17);
\path[->, dashed](5)edge(15);
\path[->](7)edge(17);
\path[->](7)edge(16);
\path[->](8)edge(15);
\path[->](8)edge(19);
\path[->, dashed](6)edge(16);
\path[->, dashed](6)edge(19);
\path[->, dashed](12)edge(17);
\path[->, dashed](12)edge(19);
\path[->, dashed](14)edge(17);
\path[->, dashed] (14)edge(20);
\path[->](13)edge(20);
\path[->](13)edge(19);
\end{tikzpicture}}
\end{figure}
\begin{figure}
\caption{The chain complex $\widehat{\mathit{CFL}}(\widetilde{\mathcal D})$ in Alexander grading $A_1 = \frac{1}{2}$, and the chain complex $\widehat{\mathit{CFK}}(\widetilde{\mathcal D})$ in Alexander grading $A_1 = -1$. Dashed arrows denote differentials appearing only in the latter.}
\label{1/2 Figure}
\centering
\begin{tikzpicture}[node distance = 2 cm, auto]
\node(1){$k^1k^2$};
\node(2)[below left = 1.5 cm and .5 cm of 1]{$i^1m^2$};
\node(3)[below right = 1.5 cm and .5 cm of 1]{$m^1i^2$};
\node(4)[left of = 2]{$e^1m^2$};
\node(5)[left of = 4]{$c^1c^2$};
\node(6)[right of = 3]{$m^1e^2$};
\node(7)[right of = 6]{$\ell^1\ell^2$};
\path[->](1)edge(2);
\path[->](1)edge(3);
\path[->, dashed](1)edge(5);
\path[->, dashed](1)edge(7);
\node(8)[below = 4.2 cm of 1]{$b^1b^2$};
\node(9)[left of = 8]{$a^1f^2$};
\node(10)[left of = 9]{$f^1a^2$};
\node(11)[right of = 8]{$a^1j^2$};
\node(12)[right of = 11]{$j^1a^2$};
\path[->, dashed](5)edge(8);
\path[->](5)edge(9);
\path[->](5)edge(10);
\path[->, dashed](7)edge(8);
\path[->](7)edge(11);
\path[->](7)edge(12);
\path[->](4)edge(10);
\path[->](4)edge(11);
\path[->](6)edge(9);
\path[->](6)edge(12);
\path[->, dashed](2)edge(9);
\path[->, dashed](2)edge(12);
\path[->, dashed](3)edge(10);
\path[->, dashed](3)edge(11);
\node(13)[below = 4 cm of 2]{$c^1\ell^2$};
\node(14)[below = 4 cm of 3]{$\ell^1c^2$};
\node(15)[below of = 13]{$b^1k^2$};
\node(16)[below of = 14]{$k^1b^2$};
\path[->](13)edge(15);
\path[->](14)edge(16);
\end{tikzpicture}
\end{figure}
\begin{figure}
\caption{The chain complex $\widehat{\mathit{CFL}}(\widetilde{\mathcal D})$ in Alexander grading $A_1 = \frac{3}{2}$, and the chain complex $\widehat{\mathit{CFK}}(\widetilde{\mathcal D})$ in Alexander grading $A_1 = 0$. Dashed arrows denote differentials appearing only in the latter.}
\label{3/2 Figure}
\centering
\begin{tikzpicture}[node distance = 2cm, auto]
\node(1){$\ell^1m^2$};
\node(2)[right of = 1] {$m^1c^2$};
\node(3)[right of = 2, node distance = 3 cm] {$c^1m^2$};
\node(4)[right of = 3]{$m^1\ell^2$};
\node(5)[below of = 1]{$a^1b^2$};
\node(6)[below of = 2]{$k^1a^2$};
\node(7)[below of = 3]{$a^1k^2$};
\node(8)[below of = 4]{$b^1a^2$};
\path[->, dashed](1)edge(5);
\path[->](2)edge(6);
\path[->](3)edge(7);
\path[->, dashed](4)edge(8);
\path[->,](1)edge(6);
\path[->, dashed](2)edge(5);
\path[->, dashed](3)edge(8);
\path[->,](4)edge(7);
\end{tikzpicture}
\end{figure}
\begin{figure}
\caption{The chain complex $\widehat{\mathit{CFL}}(\widetilde{\mathcal D})$ in Alexander grading $A_1 = \frac{5}{2}$, and the chain complex $\widehat{\mathit{CFK}}(\widetilde{\mathcal D})$ in Alexander grading $A_1 = 1$. Dashed arrows denote differentials appearing only in the latter.}
\label{5/2 Figure}
\centering
\begin{tikzpicture}[node distance = 2 cm, auto]
\node(1){$m^1m^2$};
\node(2)[below of = 1]{$a^1a^2$};
\path[->, dashed][bend left](1)edge(2);
\path[->, dashed][bend right](1)edge(2);
\end{tikzpicture}
\end{figure}
\raggedright
\bibliographystyle{amsplain}
|
{
"timestamp": "2012-06-27T02:04:15",
"yymm": "1206",
"arxiv_id": "1206.5989",
"language": "en",
"url": "https://arxiv.org/abs/1206.5989"
}
|
\section{Introduction}\label{introduction}
The high redshift frontier has moved to $z>7$ as a result of the high
resolution near-infrared (NIR) images from the \emph{Hubble Space
Telescope} (\emph{HST}) Wide Field Camera 3 (WFC3), and the Lyman
break `dropout' technique. The Lyman break technique was first
applied to select Lyman break galaxies (LBGs) at $z\simeq 3$
\citep{guha90,stei96,stei99}, and since then it has been extensively
used to select and study LBG candidates at redshifts $z\simeq 3$--$8$
\citep[e.g.,][]{bouw07,hath08b,redd09,fink10,yan10}. This dropout
technique has generated large samples of faint star-forming galaxy
candidates at $z\simeq 3$--$8$. However, at highest redshifts ($z>3$),
it is very difficult to understand the details of their stellar
populations using current space and ground-based telescopes. Their
faint magnitudes make it extremely difficult to do spectroscopic
studies, and limited high resolution rest-frame optical photometry
make it challenging to investigate their spectral energy distributions
(SEDs). These limitations make it imperative to identify and study
LBGs at lower redshifts ($z\lesssim 3$). The primary reason for the
lack of dropout selected LBGs at $z\simeq 1$--$3$\ is that we need highly
sensitive space-based cameras to observe the mid- to near-ultraviolet
(UV) wavelengths required to identify Lyman break at $z\simeq 1$--$3$. The
peak epoch of global star-formation rate at $z\simeq 1$--$3$\ is now
accessible using the dropout technique with the WFC3 UVIS
channel. \citet[][hereafter H10]{hath10} and \citet{oesc10} have used
the \emph{HST} WFC3 with its superior sensitivity to photometrically
identify lower redshift ($z\simeq 1$--$3$) LBGs. Understanding the LBGs at
$z\lesssim 3$ is vital for two main reasons. First, we need to study
the star-formation properties of these LBGs, because they are at
redshifts corresponding to the peak epoch of the global star-formation
rate \citep[e.g.,][]{redd08,redd09,ly09,bouw10}. Second, they are likely lower
redshift counterparts of the high redshift LBGs --- because of their
identical dropout selection and similar physical properties --- whose
understanding will help shed light on the process of reionization in
the early universe \citep[e.g.,][]{labb10,star10}.
There are primarily three techniques to select star-forming galaxies
at $z\simeq 2$: (1) \emph{sBzK} \citep[using the $B$, $z$, $K$
bands,][]{dadd04,dadd07}, (2) BX/BM \citep[using the $U$, $G$, $R$
bands,][]{stei04, adel04}, and (3) LBG \citep[using the bands which
bracket the redshifted Lyman limit, H10;][]{oesc10}. All these
approaches select star-forming galaxies, and yield insight into the
star-forming properties of these galaxies, but they have differing
selection biases, and so these samples don't completely overlap
\citep[see][for details]{ly11,habe12}. Therefore, it is essential to
apply identical selection criteria at all redshifts to properly
compare galaxy samples and accurately trace their evolution. The LBG
selection is widely used to select high redshift ($z>3$) galaxies, and
to do equal comparison with these galaxies, here we investigate
physical properties of LBGs at $z\lesssim 3$.
H10 used UV observations of the WFC3 Science Oversight Committee
Early Release Science extragalactic program (PID: 11359, PI:
O'Connell; hereafter ``ERS''), which covers approximately 50~arcmin$^2$\
in the northern-most part of the Great Observatories Origins Deep
Survey \citep[GOODS;][]{giav04} South field, to identify LBGs at
$z\simeq 1$--$3$. The high sensitivity of the WFC3 UVIS channel data
\citep{wind11}, along with existing deep optical data obtained with
the Advanced Camera for Surveys (ACS) as part of the GOODS program are
ideal to apply dropout technique in observed UV filters to select LBG
candidates at $z\simeq 1$--$3$. In this paper, we use this H10 sample of LBGs to
investigate their physical properties by fitting stellar synthesis
models to their observed SEDs.
This paper is organized as follows: In \secref{data}, we summarize the
WFC3 ERS observations, and discuss our LBG sample at $z\simeq 1$--$3$\ as well
as the comparison sample of LBGs at $z\simeq 4$--$5$. In
\secref{seds}, we fit observed SEDs of LBGs at $z\simeq 1$--$3$\ and $z\simeq 4$--$5$\
to stellar population synthesis models, and discuss
the best-fit parameters (redshift, UV spectral slope, stellar mass,
stellar age, and star-formation rates) obtained from these SED fits.
In \secref{results}, we discuss correlations between best-fit physical
parameters and their implications on our understanding of LBGs.
In \secref{conclusion}, we conclude with a summary of our
results.
In the remaining sections of this paper we refer to the
\emph{HST}/WFC3 F225W, F275W, F336W, F098M, F125W, F160W, filters as
$U_{\rm 225}$, $U_{\rm 275}$, $U_{\rm 336}$, $Y_{\rm 098}$, $J_{\rm 125}$, $H_{\rm 160}$, to the \emph{HST}/ACS
F435W, F606W, F775W, F850LP filters as $B_{\rm 435}$, $V_{\rm 606}$, $i_{\rm 775}$, $z_{\rm 850}$, and
to the \emph{Spitzer}/IRAC 3.6~$\mu$m, 4.5~$\mu$m filters as [3.6],
[4.5], respectively, for convenience. We assume a \emph{Wilkinson
Microwave Anisotropy Probe} (WMAP) cosmology with $\Omega_m$=0.274,
$\Omega_{\Lambda}$=0.726 and $H_{\rm 0}$=70.5~km s$^{-1}$ Mpc$^{-1}$, in
accord with the 5 year WMAP estimates of \citet{koma09}. This
corresponds to a look-back time of 10.4~Gyr at $z\simeq 2$.
Magnitudes are given in the AB$_{\nu}$ system \citep{oke83}.
\section{Observations and Sample Selection}\label{data}
The WFC3 ERS observations \citep{wind11} were done in both the UVIS
(with a FOV of 7.30 arcmin$^2$) and the IR (with a FOV of 4.65 arcmin$^2$)
channels. Here, we briefly summarize the UV imaging observations. The
WFC3 ERS UV observations were carried out in three broad-band filters
$U_{\rm 225}$, $U_{\rm 275}$\ and $U_{\rm 336}$. The $U_{\rm 225}$\ and $U_{\rm 275}$\ filters were
observed for 2 orbits ($\sim$5688s) per pointing, while the $U_{\rm 336}$\
filter was observed for 1 orbit ($\sim$2778 s) per pointing, for a
total of 40 orbits over the full ERS field (8 pointings). We used the existing
GOODS v2.0\footnote{http://archive.stsci.edu/pub/hlsp/goods/v2/}
reduction of the ACS images in four optical bands ($B_{\rm 435}$, $V_{\rm 606}$,
$i_{\rm 775}$, $z_{\rm 850}$), which were re-binned to a pixel size of 0.09\arcsec.
To match the ERS IR ($Y_{\rm 098}$, $J_{\rm 125}$, $H_{\rm 160}$) and re-pixellated ACS
optical images, the UV mosaics have a pixel scale of
0.090\arcsec~pix$^{-1}$ and cover $\sim$50~arcmin$^2$\ area of the
GOODS-South field. Details of these observations and reduction
process are described in \citet{wind11}.
The combination of the three WFC3 UV filters and the four ACS optical
filters provide an excellent ability to select LBGs at $z\simeq 1$--$3$\
\citep[H10;][]{oesc10}, using the dropout technique to detect the
Lyman-break at rest-frame 912~\AA\ \citep{mada95}. H10 used dropout
color selection technique in the ERS UV field to identify three sets
of UV-dropouts --- $U_{\rm 225}$-dropouts, $U_{\rm 275}$-dropouts and
$U_{\rm 336}$-dropouts --- which are LBG candidates at $z\simeq 1.6$,
$2.2$ and $2.6$, respectively. They found 66 $U_{\rm 225}$-, 151 $U_{\rm 275}$-
and 256 $U_{\rm 336}$-dropouts to a magnitude limit of AB$\,\simeq\,$26.5~mag,
respectively.
In this paper, we start with the H10 LBG sample. Our goal is to
investigate SEDs of reliable LBG candidates with at least 10-band HST
coverage (augmented by additional data as described in \secref{seds})
from the H10 sample, so we apply the following filtering
criteria. First criterion is the availability of the WFC3 IR ($Y_{\rm 098}$,
$J_{\rm 125}$, $H_{\rm 160}$) data. H10 used the WFC3 UVIS and ACS data to select LBGs
at $z\simeq 1$--$3$. The WFC3 UVIS channel has a larger final ERS mosaic than
the WFC3 IR channel, so we exclude LBG candidates that don't have WFC3
IR data from our SED analysis. This criterion reduces the H10 sample
size by about 10\%. Secondly, galaxies with poor SED fits (see
\secref{seds}) as measured by their larger $\chi^2$ were excluded from
the sample. The galaxies which fail the SED fit are usually fainter,
does not have all NIR photometric data or have highly uncertain NIR
photometry, has poor best-fit SED (indicated by high $\chi^2$) and
could have a primary lower-redshift ($z<1$) solution. This criterion
removes additional $\sim$10\% of galaxies from the H10 sample. This
fraction of catastrophic $\chi^2$ outliers is consistent with the
outlier fraction in the photometric redshift distribution of the
dropouts in the H10 sample. The final sample of LBGs for the SED
analysis --- after applying above mentioned criteria --- is 47
$U_{\rm 225}$-, 126 $U_{\rm 275}$- and 213 $U_{\rm 336}$-dropouts.
To compare SED properties of LBGs at $z\simeq 1$--$3$, we select $B_{\rm 435}$- and
$V_{\rm 606}$-dropouts in the WFC3 ERS field. These dropouts --- LBG
candidates at $z\simeq 3.7$ and $4.7$, respectively --- were selected
following the \citet{bouw07} selection criteria. The goal of this
paper is to compare the HST/WFC3 selected UV-dropout galaxies at
$z\simeq 1$--$3$\ with similar galaxies at higher redshifts ($z\sim
4$--$5$). The limited area and depth of the ERS data puts brightness
limitations on our UV-dropout selection, which was confined to
comparatively brighter part of the rest-UV luminosity function (around the
knee and brighter as shown in H10). Therefore, we have
selected $B_{\rm 435}$-dropouts and $V_{\rm 606}$-dropouts in the ERS field whose
luminosities are similar to the UV-dropouts. The luminosity range is
$0.1L^*\lesssim L \lesssim 2.5L^*$ (based on $L^*$ corresponding to
M=--21 mag). Applying similar filtering criteria as LBGs at $z\simeq 1$--$3$,
we have a comparison sample of 155 $B_{\rm 435}$-dropouts and 27
$V_{\rm 606}$-dropouts in the ERS field. Based on \citet{xue11} X-ray
catalog, there are four active galactic nuclei (AGN) in the H10 LBG
sample ($z\lesssim 3$), three AGN in the $B_{\rm 435}$-dropout sample, and
none in the $V_{\rm 606}$-dropout sample. These small number of X-ray AGN does
not affect our results or conclusions.
All subsequent analysis in this paper is done identically on these 5
samples ($U_{\rm 225}$-, $U_{\rm 275}$-, $U_{\rm 336}$-, $B_{\rm 435}$-, and $V_{\rm 606}$-dropouts) for
proper comparison. To show general evolutionary trends, we combine
three dropout samples from H10 as a UV-dropout sample ($z\simeq 1$--$3$), and
two high redshift samples as a $B_{\rm 435}$-,$V_{\rm 606}$-dropout sample ($z\simeq 4$--$5$).
\section{Spectral Energy Distributions}\label{seds}
The \texttt{Le PHARE} software package \citep{arno99, ilbe06} was used
to measure the photometric redshifts, and to fit the broadband SEDs of
LBGs. The primary goal of SED fitting is to find the best-fitting
synthetic stellar population model to the observed photometry. From
this best-fit model, we can estimate the redshift, stellar age,
stellar mass, star-formation rate (SFR), dust extinction, and other
physical properties of each galaxy. We use the 2007 version of the
\citet[hereafter CB07]{bruz03} models, which has improved prescription
of thermally pulsating AGB stars. We generated a set of stellar
population models assuming a Salpeter initial mass function, and
varying the redshift ($z=0.1$--6.0, $\delta$z=0.1, though a parabolic
interpolation is used to refine the photometric redshift solution
within $\delta$z intervals), metallicity (0.2,
0.4 and 1 $Z_{\odot}$), age (1 Myr $\leq t \leq$ $t_{H}$), dust
extinction (0 $\leq$ E(B-V) $\leq$ 0.7~mag, using a modified
\citealt{calz00} attenuation law), and $e$-folding timescale
($\tau$=0.1,0.3,1,2,3,5,10,15,30~Gyr) for a star-formation history
(SFH)$\,\propto\,$exp(-t/$\tau$). The \texttt{Le PHARE} code assumes
the \citet{mada95} prescription to estimate inter-galactic medium
(IGM) opacity. The model that gives the lowest $\chi^2$ is chosen as
the best-fit SED.
The contribution of major emission lines in different filters can be
included in the models using the \texttt{Le PHARE} code. Neglecting
emission lines during the SED fitting process can overestimate the
best-fit stellar ages and masses by as much as 0.3 dex
\citep[e.g.,][]{scha09,fink11,atek11}. The \texttt{Le PHARE} code
accounts for the contribution of emission lines with a simple recipe
based on the \citet{kenn98} relations between the SFR and UV
luminosity, H$\alpha$ and [OII] lines. The code includes the
Ly$\alpha$, H$\alpha$, H$\beta$, [OII], OIII[4959] and OIII[5007]
lines with different line ratios with respect to [OII] line, as
described in \citet{ilbe09}.
The observed photometry is available in up to 13 filters: three
\emph{HST}/WFC3 UVIS, four \emph{HST}/ACS, three \emph{HST}/WFC3 IR,
one VLT Ks, and two \emph{Spitzer}/IRAC [3.6], [4.5] bands. We perform
matched aperture photometry in 10 \emph{HST} bands as discussed in
H10, while we use VLT and \emph{Spitzer} photometry from the publicly
available GOODS-MUSIC catalog \citep{sant09}. The photometry in MUSIC
catalog has accurate PSF-matching of space and ground-based images of
different resolution and depth. \figref{fig:seds} shows example
best-fit SEDs for LBGs at $z\simeq 1$--$3$, and the comparison sample of LBGs
at $z\simeq 4$--$5$.
\subsection{Photometric Redshifts}\label{photz}
One of the free parameters during the SED fitting process is the
redshift. To assess the accuracy of our SED-based photometric
redshifts ($z_{ph}$) at $z\simeq 1$--$3$, we compare them with the
spectroscopic redshifts ($z_{sp}$) from various VLT/Magellan campaigns
in the GOODS-S field
\citep[e.g.,][]{graz06,ravi07,vanz08,wuyt08,bale10,coop12}. We find
that only a small number ($\lesssim\,$30\%) of H10 dropout sample has
spectroscopic redshifts, most likely due to the lack of strong
features in observed 4500--9000~\AA\ range at $1 \lesssim z \lesssim
3$, where most ground-based spectrographs on large telescopes are
optimized. We matched 91 spectroscopic redshifts for the UV-dropout
sample ($z\simeq 1$--$3$) selected with the criteria discussed in
\secref{data}. \figref{fig:redshifts} shows the comparison between the
SED based photometric redshifts and the publicly available
spectroscopic redshifts. The catastrophic outliers --- shown by
concentric circles in \figref{fig:redshifts} --- have quality flags
that indicate the spectroscopic redshift is unreliable (in most
catalogs C or worst). So it is likely that these spectroscopic
redshifts are not correct and hence, redshift comparison for these
objects is not credible. The histogram in \figref{fig:redshifts}
shows the distribution of photometric redshift uncertainties $\delta
z$=($z_{sp} - z_{ph}$/1+$z_{sp}$). Based on this distribution, we
estimate $\sigma$($\delta z$)$\simeq$0.05, and $<\delta
z>$=--0.03. The fraction of catastrophic outliers ($>$3$\sigma$) is
$\sim$7\%, excluding objects with unreliable spectroscopic redshifts.
Our photometric redshift uncertainties are consistent with
\citet{dahl10}, who used the deepest and the most comprehensive
photometric data in the GOODS-S field. \citet{habe12} selected fairly
bright LBGs at $z\simeq 2$ using the \emph{GALEX} data, and found
similar photometric redshift uncertainties and outlier fraction for
their dropout sample. The photometric redshift uncertainty in the
implied redshift is also consistent with the dropout selection method
applied to select these galaxies. The dropout selection technique uses
the location of a spectral break within a photometric bandpass
(filter), and therefore, the redshift uncertainty depends on the width
of the bandpass, and could be as high as $\sim$0.5 in $z$.
The distribution of photometric redshift uncertainties ($\delta z$) is
asymmetric, even after excluding objects with unreliable spectroscopic
redshifts. There are more galaxies in the distribution with
spectroscopic redshifts lower than their photometric redshifts i.e.,
($z_{sp} - z_{ph} < 0$). Detail investigation of each ground-based
spectra (if available) is needed to figure out what is causing
spectroscopic redshift to be lower than photometric redshift. Such an
investigation is beyond the scope of this paper, but we should point
out that such asymmetric distribution is also observed for
\emph{GALEX}--selected LBGs \citep[e.g.,][]{habe12}, and is totally
consistent within the estimated photometric redshift uncertainties.
\subsection{UV Spectral Slope $\beta$}\label{beta}
The UV spectral slope $\beta$ is determined from a power-law fit to
the UV continuum spectrum \citep{calz94}, $f_{\lambda} \varpropto
\lambda^{\beta}$, where $f_{\lambda}$ is the flux density per unit
wavelength (ergs s$^{-1}$ cm$^{-2}$ \AA$^{-1}$). We use the best-fit
SEDs of dropout selected LBGs to estimate their UV spectral slope
$\beta$ by fitting a straight line between rest-frame 1300 and
1900~\AA\ in their model spectrum. This wavelength range covers 7 out
of 10 spectral fitting windows identified by \citet{calz94} to
estimate the UV spectral slope. This wavelength range is also ideal
for comparing $\beta$ values at higher redshifts, because those are
usually measured between rest-frame 1600 and
2000~\AA. \figref{fig:beta_fit} shows the slope-fitting method applied
to the best-fit SEDs to estimate $\beta$, where the solid line is the
best-fit UV spectral slope, the dashed line is the best-fit SED, and
the black filled circles are observed magnitudes. By selection
(\secref{data}), we only consider galaxies with good SED fits so the
choice of model should not affect the $\beta$ estimate. Though
uncertainties in observed photometry could affect the best-fit SED
parameters, and hence, the $\beta$ estimate. In \figref{fig:beta_fit}
, we also quote typical intrinsic uncertainty in $\beta$ for galaxies
at different redshifts. We estimate $\beta$ for each galaxy, and then
fit a Gaussian to the $\beta$ distribution to find median (and sigma)
value in each redshift bin. \tabref{tab:beta} shows median $\beta$
values and their corresponding uncertainties for the UV-dropout, and
the $B_{\rm 435}$-,$V_{\rm 606}$-dropout samples.
The evolution in the UV spectral slope $\beta$ as a function of
redshift may indicate change in stellar populations of galaxies over
cosmic time. We compare our $\beta$ values with the higher redshift
measurements from the literature \citep[e.g.,][]{bouw12,
fink12}. \figref{fig:beta_z} shows the UV spectral slope $\beta$ as
a function of redshift. Blue filled squares are median $\beta$ values
measured between rest-frame 1300 and 1900~\AA\ for our dropout
samples. To test how $\beta$ measurements are affected by the
selection of rest-frame UV wavelength range, we also measured $\beta$
between rest-frame 1300 and 3400~\AA, which are shown by blue open
squares in \figref{fig:beta_z}. Both $\beta$ values agree within
1$\sigma$ uncertainties. The red filled diamonds are measurements
from \citet{bouw09,bouw12} and purple filled circles are from
\citet{fink12}. The $\beta$ values from \citet{bouw09,bouw12} are for
the galaxies around $M_{\rm uv}^*$, which is consistent with our
sample, while \citet{fink12} measurements are based on all galaxies
extending to those fainter than $M_{\rm uv}^*$ in their respective
redshift bins. The uncertainties on the median values of $\beta$ are
the standard error of the mean in the case of \citet{fink12} and our
measurements, while \citet{bouw09,bouw12} uncertainties represent
1$\sigma$ scatter. For comparison, our estimated 1$\sigma$ scatter in
median $\beta$ values are listed in \tabref{tab:beta}.
\figref{fig:beta_z} shows that the median values of $\beta$ decreases
as redshift increases ($\beta\simeq\,$--1.6 at $z\simeq 1.6$ to
$\beta\simeq\,$--2.4 at $z\simeq 8$), which could imply variations in
one or more physical properties of LBGs as a function of redshift.
\figref{fig:beta_all} shows evolution in $\beta$ as a function of
best-fit SED parameters (stellar mass, stellar age, dust content, SFR)
and redshift. The lowest redshift bin ($z\simeq 1.6$) is shown by the
smallest circles, and the highest redshift bin ($z\simeq 4$--$5$) is
shown by the largest circles. The largest change (factor of $\sim$2 or
0.3 dex) is seen in the dust content E(B--V) of galaxies as
$\beta$ changes from --1.6 (at $z\simeq 1.6$) to --1.9 (at $z\simeq
4$--$5$), while other three parameters vary much less than a factor of
2. This could imply that change in the dust content of LBGs has
largest effect on the UV spectral slope $\beta$, and any variation in
$\beta$ as a function of redshift could most likely be due to changing
dust content of galaxies. Therefore, based on our $\beta$ estimates,
as shown in \figref{fig:beta_z}, we could say that LBGs at lower
redshift ($z\simeq 1.6$) have more dust than LBGs at higher redshift
($z\simeq 5$). This trend of $\beta$ is consistent with
previous studies, which argued that galaxies at $z\simeq 6$ tend
to be bluer than those at $z\simeq 3$
\citep[e.g.,][]{stan05,bouw06,hath08a,wilk11}. Those $\beta$
measurements, on uniformly selected LBGs, were limited to LBGs at
$z\gtrsim 3$, and our results below $z\simeq 3$ extends this observed
trend to $z\simeq 1.5$.
The evolution in the UV spectral slope $\beta$ could also be due to
changing star-formation history, initial mass function (IMF), and/or
metallicity. These effects are believed to be much smaller than the
effects from changing dust content of the galaxy. Many authors have
investigated various stellar population models to estimate these
effects. \citet{bouw12} explored sensitivity of the UV-continuum slope
$\beta$ to changes in the mean metallicity, age, or dust extinction by
choosing one fiducial model as a benchmark, and then changing various
model parameters to assess changes in $\beta$. They conclude that a
factor of 2 (or 0.3 dex) changes in metallicity, age or dust content
result in 0.07, 0.15, 0.35 changes in the UV spectral slope $\beta$,
respectively. This implies that changes in the dust content have much
larger effect on the UV-continuum slope than similarly-sized changes
in the age, metallicity, or the stellar IMF. Similar studies
\citep[e.g.,][]{leit99,hath08a, wilk11} have come to the same
conclusion that though $\beta$ could be affected by various stellar
population properties, the change in dust content of galaxies is the
predominant effect which causes $\beta$ to change. We should also
emphasize that it is very challenging to completely understand these
various effects based on observations only, rigorous modeling and/or
simulations are required to fully assess the contributions of these
various effects on the UV spectral slope $\beta$.
\subsection{Stellar Population Properties}\label{age_mass_sfr}
We compare observed SED with a suite of model templates from CB07 to
find the best-fit model through $\chi^2$ minimization. All SED
parameters are fit simultaneously. The best-fit
model allows us to estimate photometric redshift (as shown in
\secref{photz}), and physical properties of stellar populations such
as stellar age, stellar mass, dust extinction E(B--V) and SFRs for
each galaxy. The estimated uncertainties ($\sim$0.3--0.4 dex) in
stellar ages, masses and SFRs are estimated by marginalizing the
uncertainties in observed photometry and redshift.
One of the main limitations of SED fitting is the need to assume a
SFH, which cannot be reliably constrained for a galaxy from limited
photometric data points. We have assumed an exponentially declining
SFH. Different SFHs (e.g., rising, constant, declining) introduce
systematic uncertainties in the stellar mass determinations, mostly at
redshift greater than $z\simeq 3$ \citep[e.g.,][]{lee10,papo11}.
These uncertainties are typically $\lesssim$0.3 dex
\citep[e.g.,][]{finl07}, and are within our estimated uncertainties.
Stellar ages are highly sensitive to the assumed SFH. Any prior
star-forming episode can be overshadowed by newly born stars from the
most recent star-formation, totally neglecting possible existence of
older population in a given galaxy. Therefore, based on assumed
histories, it is possible to get an older or a younger age for the
same galaxy. Hence, interpreting the stellar ages derived from SED
fitting can be tricky, and at the very least, the uncertainties on the
stellar ages could be much larger than estimated uncertainties
($\sim$0.3--0.4 dex). Because of these issues, in subsequent
analysis, we will not elaborate on stellar population ages and focus
on other physical properties.
\figref{fig:sed_hist} shows the distributions of stellar age, stellar
mass, SFRs, and E(B--V) for LBGs at $z\simeq 1$--$3$\ (black), and the
comparison sample of LBGs at $z\simeq 4$--$5$\ (red). The median values are
shown by dashed vertical lines and 1$\sigma$ uncertainties in these
distributions for LBGs at $z\simeq 1$--$3$\ are shown by an error bar at the
top of the black histogram. A two-sided K-S test --- in each panel
--- indicates a probability \emph{less} than 0.006 that the
distributions (red and black histograms) are drawn from the
\emph{same} parent distribution. \figref{fig:sed_hist} shows a
general trend that --- on average --- higher redshift LBGs have low
SFRs, less dust, and are less massive than their lower redshift
counterparts, though median values of two distributions (red and
black) are similar within 1$\sigma$ uncertainties. This result is in
good agreement with previous studies comparing LBGs at $z\simeq 3$ and
$z\simeq 5$ \citep[e.g.,][]{verm07}. The average E(B--V) at $z\simeq
2$ is consistent with studies based on star-forming galaxies selected
using BX/BM color technique \citep[e.g.,][]{erb06, sawi12}. The
distribution of E(B--V) completely agrees with the UV spectral slope
evolution as discussed in \secref{beta}, implying that the LBGs at
$z\simeq 1$--$3$\ are more dusty (redder) compared to LBGs at $z\simeq 4$--$5$.
\section{Results and Discussion}\label{results}
\subsection{Stellar Mass vs UV Luminosity Relation}\label{mass_lum}
The rest-frame UV light traces recent or instantaneous SFR, while
rest-frame optical and NIR data help us to estimate stellar masses of
galaxies. If the galaxy stellar mass and UV luminosity are related
then we can directly use rest-frame UV light to estimate stellar mass
without needing rest-frame optical/NIR data. \figref{fig:mass_abs}
shows stellar mass of LBGs at $z\simeq 1.5$--$5$ as a function of
their UV absolute magnitude. These quantities are based on best-fit
SEDs, and their typical uncertainties are shown in the lower-left
corner. The dotted lines are best-fit line obtained by keeping the
logarithmic slope fixed at 0.46, which was estimated by \citet{sawi12}
for star-forming galaxies at $z\simeq 2$. The dot-dash lines show the
scatter from the best-fit line, which is $\sim$0.3 dex for LBGs at
$z\simeq 1$--$3$\ and about 0.2 dex for LBGs at $z\simeq 4$--$5$. We also tested the
validity of this relation by fitting the slope of the line rather than
fixing it. We find that the fitted slope is in the range of
0.42$\pm$0.06 for our LBG samples, which is consistent with 0.46
within the estimated 1$\sigma$ scatter in this relation. Therefore, we
find that a proportionality relation between these two parameters with
a logarithmic slope of 0.46 provides a good fit to the data. The
stellar masses of the brighter LBGs --- with UV luminosities near the
$L_{\rm uv}^*$ value of LBGs at $z\simeq 3$ from \citet{stei99} ---
are about a factor of 2 lower than 10$^{10}$~M$_{\odot}$ estimated by
\citet{papo01}. This discrepancy, though within our estimated
uncertainties, could be due to the fact that we include emission lines
in our SED fitting which could affect stellar masses by as much as a
factor of $\sim$2. The stellar mass--UV luminosity relation is fairly
tight with a small scatter ($\lesssim\,$0.3 dex), which is consistent
with other studies at similar redshifts \citep[e.g.,][]{papo01}, and
it points to a nearly constant mass-to-light ratio
(log(M/L)$\,\simeq\,$--0.5) for LBGs between $z\simeq 1.5$ and $5$.
The tightness/lower scatter of the stellar mass--UV luminosity
relation in \figref{fig:mass_abs} could be --- in part --- due to the
fact that both these quantities are output parameters from the
best-fit SEDs, and therefore, it is possible that these parameters are
not totally independent. We have addressed this issue and discussed
its implications in \secref{discuss}. A similar correlation between
stellar mass and absolute magnitude has been reported for LBGs at
$z\simeq 5$--$6$ by \citet{star09}.
\figref{fig:mass_abs} shows that LBGs at $z\simeq 1.5$--$5$ follow
similar linear correlation between stellar mass and UV absolute
magnitude (within uncertainties) for $M_{\rm uv}$ between --19 and
--22.5~mag. It is important to note that \citet{shap05} does not find
any correlation between the stellar mass and UV absolute magnitude for
star-forming galaxies at $z\simeq 2$ with stellar masses
$\gtrsim\,$10$^{10}$~M$_{\odot}$. This could be due to different
color-selection technique (BX/BM) used by the \citet{shap05} to select
star-forming galaxies, whose physical properties could differ from the
dropout selected LBGs at these masses \citep[e.g.,][]{ly11,habe12}. It
is also possible that their sample --- which consists of
spectroscopically confirmed bright galaxies with stellar masses
greater than or equal to 10$^{10}$~M$_{\odot}$ --- has more massive
galaxies than our sample and it is uncertain how massive galaxies
would follow this correlation. The ERS observations are too limited
in area and depth to cover a larger luminosity range, so we cannot
predict how this relation will evolve for luminous ($M_{\rm uv}\!
<\,$--22.5~mag) or dwarf ($M_{\rm uv}\!>\,$--19~mag) galaxies at these
redshifts.
\subsection{SFR vs Stellar Mass}\label{sfr_mass}
The correlation between the current SFR and stellar mass in
star-forming galaxies, also known as `main sequence of star-formation'
(MS), has been observed at $z\lesssim 2$
\citep[e.g.,][]{noes07,elba07,dadd07}. These studies have shown that
the MS relation seems to be not evolving strongly with redshift, but
the zeropoint does: that is high redshift ($z\simeq 2$) star-forming
galaxies are forming stars at a higher rate than similar mass local
galaxies. In \figref{fig:mass_sfr}, we investigate this relation and
star-formation histories for LBGs at $z\simeq 1.5$--$5$. These
quantities are based on best-fit SEDs, and their typical uncertainties
are shown in the lower-right corner. The dotted lines are best-fit
line obtained by keeping the logarithmic slope fixed at 0.90,
estimated for star-forming galaxies at $z\le 2$
\citep[e.g.,][]{elba07,dadd07,sawi12}. The dot-dash lines show the
scatter from the best-fit line, which is $\sim$0.6 dex for LBGs at
$z\simeq 1$--$3$\ and about 0.4 dex for LBGs at $z\simeq 4$--$5$. We also obtained the
best-fit logarithmic slope for this relation, and found the fitted
slope in the range of 0.81$\pm$0.30 for our LBG samples, which is
consistent with 0.90 within the estimated 1$\sigma$ scatter in this
relation. We find that a proportionality with a logarithmic slope of
0.90 provides a good fit to the data with few outliers at stellar mass
greater than 10$^{10}$~M$_{\odot}$.
\citet{finl06} have shown that tight relation exists between SFR and
stellar mass for galaxies at $z\simeq 4$ using the cosmological
hydrodynamic simulations, which is also consistent with the
observations \citep{bouw12}. \citet{finl06} also point out that the
scatter in the \figref{fig:mass_sfr} could be a measure of SFR
`burstiness' as a function of stellar mass. This means that the linear
relation (with a logarithmic slope of $\sim$0.90) indicate an average
SFR for a given stellar mass, but galaxies can also experience bursts
of up to two times the average SFR value at the same stellar mass as
shown by the scatter. The scatter in the SFR versus stellar mass
relation for LBGs at $z\simeq 1$--$3$\ is slightly larger than $\sim$0.3 dex
--- observed at $z\simeq 2$ by \citet{dadd07} --- possibly because of
few galaxies forming a sharp edge towards high SFR values, as seen in
the relation for $U_{\rm 225}$- and $U_{\rm 275}$-dropouts (upper panel in
\figref{fig:mass_sfr}). These galaxies have low stellar ages (less
than 10~Myr), which could be highly uncertain as discussed in
\secref{age_mass_sfr}. It is also possible that this edge
could be an artifact due to lower limits on the model parameters $\tau$
and \emph{t} \citep[e.g.,][]{hain12}. We also note that
\citet{mclu11} argue that the tightness in the SFR-stellar mass
relation depends on the assumed SFH. The scatter in this relation is
much less for a constant SFH, while it is much larger for other
SFHs. Therefore, it is also likely that the larger scatter we see in
\figref{fig:mass_sfr} could be due to different SFHs.
\figref{fig:mass_sfr} shows that, though our data has little more
scatter compared to the MS relation at $z\lesssim 2$, the majority of
our galaxies fall on to this relation characterized by a logarithmic
slope of 0.90. A similar correlation is observed at $z\simeq 6$--$8$
by \citet{mclu11}, and supported by cosmological hydrodynamic
simulations of \citet{finl11}. Our observations confirm this MS
relation for star-forming galaxies from $z\simeq 1.5$ to $5$, implying
that --- on average --- their star-formation histories are similar.
\subsection{Implications}\label{discuss}
In previous sections, we have shown that LBGs at $z\simeq 1$--$3$\ --- on
average --- are massive, dustier, and have higher star-formation rates
than LBGs at $z\simeq 4$--$5$\ with similar luminosities, though it should
also be noted that they are not very different within estimated
1$\sigma$ uncertainties. As pointed out by \citet{papo11}, the number
densities of galaxies at fixed luminosity could change substantially
over this redshift range, which could lead to potential biases when
comparing galaxies at different redshifts. However, the general
trends we observe in stellar masses, SFRs, and dust extinction are
supported by other independent means. The characteristics UV
luminosity ($L_{\rm uv}^*$) is increasing as a function of redshift
from $z\sim 8$ to $2$ (e.g., H10), which implies increase in SFRs with
time, while \citet{fink10} have shown that stellar masses for $M_{\rm
uv}^*$ LBGs grow from $z\simeq 8$ to $2$. The UV spectral slope
$\beta$ shows evolution as a function of redshift
(\figref{fig:beta_z}), which could indicate lower dust content at
higher redshifts. The higher dust content in LBGs at lower redshift
is also in accordance with the studies at $z\simeq 1$
\citep[e.g.,][]{burg07,basu11}, while the \citet{verm07} supports the
lower dust content in LBGs at $z\simeq 5$. Therefore, the ensemble
properties of LBGs in our sample are in general agreement with the
expected results.
The stellar mass--UV luminosity relation (\figref{fig:mass_abs}) and
the SFR--stellar mass relation (\figref{fig:mass_sfr}) are based on
measurements from best-fit SEDs, therefore, it is possible that these
quantities are not totally independent, which might affect their
observed correlations. To investigate this, we show distributions of
mass-to-light (M/L; Mass/$L_{\rm uv}$) ratios and specific SFRs (SSFR;
SFR/Mass) in \figref{fig:ssfr}. The black (red) histograms show
distribution for LBGs at $z\simeq 1$--$3$\ ($z\simeq 4$--$5$), and the median values
are shown by dashed vertical lines. The median values of M/L ratio and
SSFR for LBGs at $z\simeq 4$--$5$\ are slightly lower than that at $z\simeq 1$--$3$,
but are still consistent within the 1$\sigma$ uncertainties as shown
by the error bar on the top of the black histogram. A two-sided K-S
test --- in each panel --- indicates a probability \emph{less} than
0.05 that the distributions (red and black histograms) are drawn from
the \emph{same} parent distribution. The constancy of the M/L ratio
and SSFR between $z\simeq 1.5$ and $5$ agrees very well with the
constant slope we find in \figref{fig:mass_abs} and
\figref{fig:mass_sfr} for our sample of LBGs, though with a slightly
larger scatter.
Stellar masses of LBGs at $z\simeq 1$--$3$\ are generally well correlated with
UV absolute magnitude and current SFR, as expected for star-forming
galaxies at similar redshifts
\citep[e.g.,][]{elba07,dadd07,sawi12}. These correlations implies very
similar mass assembly and SFH for these galaxies, but the exact nature
of SFHs is still not clearly understood. \citet{papo11} showed that
the cosmologically averaged SFRs of star-forming galaxies at $3 < z <
8$ --- at constant co-moving number density --- increase smoothly from
$z=8$ to $3$, and the stellar mass growth in these galaxies is
consistent with this derived SFH. The scenario of rising SFH
\citep[see also][]{lee10} is also supported by recent results from the
cosmological hydrodynamic simulations \citep[e.g.,][]{finl11}. The
models with rising SFHs conflicts with the assumptions that the SFR in
distant galaxies is either constant or decreasing exponentially with
time \citep[e.g.,][]{papo05,shap05,labb10}. Though, we remind the
reader that the models with rising SFHs advocated by \citet{papo11}
and others correspond to a cosmologically averaged SFHs for typical
galaxies, and not individual galaxies, because they could involve
random events that changes their instantaneous SFR. \citet{papo11} and
\citet{lee10} also argue that rising SFHs are most beneficial to
higher redshift ($z\gtrsim 3$) galaxies. We find that for our assumed
SED model parameters, the LBGs between redshift $z\simeq 1.5$ and $5$
--- on average --- have similar SFHs, though the precise nature of
SFHs at all redshift is still under debate, and could also affect the
SFR--stellar mass correlation.
Our analysis demonstrates that the dropout selected galaxies at
$z\simeq 1$--$3$\ --- within luminosities probed here --- show similar
correlations between physical parameters (SFR, stellar mass, UV
luminosity) as other star-forming galaxies selected using different
color criteria (e.g., $sBzK$, BX/BM) at $z\simeq 2$. This is
consistent with the \citet{ly11} conclusion that majority
($\sim$80--90\%) of the dropout selected galaxies overlap with other
color selected star-forming galaxies with stellar masses less than
10$^{10}$~M$_{\odot}$. The stellar mass range for our current sample
is between $\sim$10$^{8}$ and $\sim$10$^{10}$ M$_{\odot}$, with sample
completeness around 10$^{9-9.5}$ M$_{\odot}$. Significant differences
between the dropout selected sample and other color selected samples
of star-forming galaxies at $z\simeq 2$ exists for massive galaxies
($\gtrsim\,$10$^{10}$ M$_{\odot}$; \citealt{ly11}). Therefore, it is
vital to use uniform selection technique at all redshifts to avoid any
selection biases. The Lyman break dropout technique is the most
convenient and widely used method to select galaxies at $z\gtrsim 3$,
and we have shown that LBGs at $z\simeq 1$--$3$\ selected using this dropout
technique have similar physical properties (within uncertainties) as
LBGs at $z\simeq 4$--$5$\ with similar luminosities. Hence, LBG selection at
$z<3$ is important to understand properties of LBGs and
properly investigate their evolution as a function of redshift. The
validity of LBG properties over wide luminosity and mass range can be
investigated in detail with the upcoming and future WFC3 UV surveys
such as CANDELS \citep{grog11,koek11} and the WFC3 UV UDF \citep[PI:
H. Teplitz]{rafe12}.
\section{Summary}\label{conclusion}
In this paper, we investigated stellar populations of LBGs at
$z\simeq 1$--$3$\ selected using \emph{HST}/WFC3 UVIS filters in the GOODS-S
field. We used deep multi-wavelength observations from the \emph{HST}, VLT,
and \emph{Spitzer} to compare observed SEDs with the spectral synthesis
models to infer physical properties (stellar masses, stellar ages,
SFRs, and dust extinction) of these LBGs. We also compared these LBGs
with their higher redshift ($z\simeq 4$--$5$) counterparts with similar
luminosities ($0.1L^*\lesssim L \lesssim 2.5L^*$). Our results can be
summarized as follows:
$\bullet$ We obtain reliable ($\sigma$($z_{sp} -
z_{ph}$/1+$z_{sp}$)$\simeq$0.05) photometric redshifts for dropout
selected LBGs at $z\simeq 1$--$3$\ based on 10--13 band SEDs.
$\bullet$ The UV continuum slope $\beta$ for LBGs at $z\simeq 1$--$3$\ is
redder ($\beta\simeq\,$--1.6 at $z\simeq 1.6$) compared to their
higher redshift counterparts ($\beta\simeq\,$--2.4 at $z\simeq 8$),
implying higher dust content in these LBGs.
$\bullet$ On average, LBGs at $z\simeq 1$--$3$\ are massive, dustier and more
highly star-forming compared to LBGs at $z\simeq 4$--$5$, though their median
values are very similar within estimated 1$\sigma$ uncertainties.
This similarity emphasizes the importance of identical Lyman break
selection technique at all redshifts, which selects physically similar
galaxies.
$\bullet$ The stellar mass--absolute UV magnitude relation for LBGs
between $z\simeq 1.5$ and $5$ show linear correlation with a
logarithmic slope of $\sim$0.46, while the SFR--stellar mass relation
show similar correlation with a logarithmic slope of $\sim$0.90. To
properly compare and interpret such relations at higher ($z>3$)
redshift, and to avoid any selection biases due to different selection
techniques, a true Lyman break selection is required at $z\simeq 2$.
$\bullet$ We need larger \emph{HST} UV surveys to cover full range in
luminosity/mass and better understand LBG properties, and their
evolution. Both deeper and wider UV surveys are needed. The wider one
to probe the high mass end, while the deeper one will probe the
sub-$L^*$ population. A large number of \emph{HST} orbits have been used
for dropout selected galaxies at $z>3$, and the lower redshift regime
needs to be explored in a comparable manner.
\acknowledgments
We thank the referee for helpful comments and suggestions that
significantly improved this paper.
This paper is based on Early Release Science observations made by the
WFC3 Scientific Oversight Committee. We are grateful to the Director
of the Space Telescope Science Institute for awarding Director's
Discretionary time for this program. Support for program \#11359 was
provided by NASA through a grant HST-GO-11359.08-A from the Space
Telescope Science Institute, which is operated by the Association of
Universities for Research in Astronomy, Inc., under NASA contract NAS
5-26555. This research was (partially) supported by a grant from the
American Astronomical Society.
|
{
"timestamp": "2013-01-18T02:00:49",
"yymm": "1206",
"arxiv_id": "1206.6116",
"language": "en",
"url": "https://arxiv.org/abs/1206.6116"
}
|
\section{Introduction}
The large-scale halo of hot gas provides
a unique way to measure the baryonic and gravitational
mass of galaxy clusters.
The baryonic mass can be measured directly from
the observation of the hot X-ray emitting intra-cluster medium (ICM), and
of the associated stellar component \citep[e.g.][]{giodini2009,gonzales2007},
while measurements of the gravitational mass require
the assumption of hydrostatic equilibrium between the gas and
dark matter.
Cluster cores are subject to a variety of non-gravitational
heating and cooling processes that may result in deviations
from hydrostatic equilibrium, and in inner regions beyond the
core the ICM is expected to be in hydrostatic equilibrium
with the dark matter potential. At the
outskirts, the low-density ICM and the proximity to the
sources of accretion results in the onset
of new physical processes
such as departure from hydrostatic equilibrium \citep[e.g.,][]{lau2009},
clumping of the gas \citep{simionescu2011},
different temperature between electrons and ions \citep[e.g.,][]{akamatsu2011},
and flattening of the entropy profile \citep{sato2012}, leading
to possible sources of systematic uncertainties in the measurement of masses.
The detection of hot gas at large radii is limited
primarily by its intrinsic low surface brightness, uncertainties
associated with the subtraction of background (and foreground) emission,
and the ability to remove contamination from
compact sources unrelated to the cluster.
Thanks to its low detector background, \it Suzaku\rm\ reported the
measurement of ICM temperatures to $r_{200}$\ and beyond
for a few nearby clusters
\citep[e.g.][]{akamatsu2011,walker2012a,walker2012b,simionescu2011,burns2010,kawaharada2010,
bautz2009,george2009}; to date \emph{Abell~1835}\
has not been the target of a \it Suzaku\rm\ observation.
In this paper we report the \it Chandra\rm\ detection of X-ray emission
in \emph{Abell~1835}\ beyond $r_{200}$, using three observations
for a total of 193~ksec exposure time, extending the analysis
of these \it Chandra\rm\ data performed by \cite{sanders2010}.
The radius $r_{\Delta}$ is defined as the radius within which
the average mass density is $\Delta$ times the critical density of
the universe at the cluster's redshift for our choice of
cosmological parameters. The virial radius of a cluster
is defined as the equilibrium radius of the collapsed
halo, approximately equivalent to one half of its turnaround radius
\cite[e.g.][]{lacey1993, eke1998}.
For an $\Omega_{\Lambda}$-dominated universe,
the virial radius is approximately $r_{100}$ \citep[e.g.][]{eke1998}.
\emph{Abell~1835}\ is the most luminous cluster in the
\cite{dahle2006} sample of clusters at $z=0.15-0.3$ selected
from the \emph{Bright Cluster Survey}.
The combination of high luminosity and availability of
deep \it Chandra\rm\ observations with local background make \emph{Abell~1835}\
and ideal candidate to study its emission to the virial radius.
\emph{Abell~1835}\ has a redshift of $z=0.253$,
which for $H_{0}=70.2$~km~s$^{-1}$~Mpc$^{-1}$, $\Omega_{\Lambda}=0.73$,
$\Omega_M=0.27$ cosmology \citep{komatsu2011} corresponds
to an angular-size distance of $D_A=816.3$~Mpc, and a scale of
237.48 kpc per arcmin.
\section{Chandra and ROSAT observations of Abell~1835 and the detection
of cluster emission beyond $r_{200}$}
\label{sec:Sx}
\subsection{Chandra observations}
\it Chandra\rm\ observed \emph{Abell~1835}\ three times between December 2005 and August 2006
(observations ID 6880, 6881 and 7370), with a combined clean exposure time
of 193~ks. The three observations had similar aimpoint towards the
center of the cluster (R.A. 14h01m02s, Dec. +02d51.5m J2000) and different
roll angles. All observations were taken with the ACIS-I detector configuration,
which consists of four ACIS front-illuminated chips in a two-by-two square,
plus a fifth identical chip that may be used to measure
the \emph{in situ} soft X-ray background.
Figure~\ref{fig:a1835} is an image from the longest observation (ID 6880, 118ks)
in the soft X-ray band (0.7-2 keV). In addition to a large number of
compact X-ray sources that were excluded from further analysis, the data
show a clear detection of diffuse X-ray emission associated with two
additional low-mass clusters identified from the \emph{Sloan Digital Sky Survey},
MAXBCG J210.31728+02.75364 and WHL J140031.8+025443.
The cluster MAXBCG J210.31728+02.75364 is the only cluster in the vicinity of \emph{Abell~1835}\
reported in the MAXBCG catalog of \cite{koester2007}, and it has
a measured photo-$z$
of 0.238, while the catalog of \cite{wen2009} reports a photo-$z$
of 0.269 for the same source; given the uncertainties associated
with photometric redshifts, it is likely that the cluster is
in physical association with \emph{Abell~1835}\ ($z=0.253$).
The \cite{wen2009} catalog also reports another optically-identified cluster
in the area, WHL J140031.8+025443, with a spectroscopic redshift of $z=0.2505$.
The association of these two groups with \emph{Abell~1835}\ is confirmed by redshift
data provided by C. Haines (personal communication), who measures
a redshift of $z=0.250$ for WHL J140031.8+025443, and $z=0.245$ for MAXBCG J210.31728+02.75364.
Since the goal of this paper is to study the
diffuse emission associated with \emph{Abell~1835}, we excise a region of radius 90~arcsec
around the position of the two clusters (black circles in
Figure~\ref{fig:a1835}), and study their emission separately from that of \emph{Abell~1835}\
(see Section~\ref{sec:low-mass-clusters}).
\begin{figure}
\centering
\includegraphics[width=3.1in,angle=-90]{a1835Chandra.ps}
\caption{Image of \emph{Abell~1835}\ from observation 6880, in the 0.7-2 keV band.
The data were smoothed with a Gaussian kernel of $\sim$6 arcsec standard error.
The dashed circles correspond to radial distances of approximately $r_{500}$\ and $r_{200}$,
and the full black circles mark the position of the two low-mass clusters associated with
\emph{Abell~1835}.}
\label{fig:a1835}
\end{figure}
\subsection{Chandra data analysis}
The reduction of the \it Chandra\rm\ observations follow the procedure
described in \cite{bonamente2006} and \cite{bonamente2011}, which consists of filtering the
observations for possible periods of flaring background,
and applying the latest calibration; no significant flares
were present in these observations. The reduction was
performed in CIAO~4.2, using CALDB 4.3; in Sec.~\ref{sec:robustness} we discuss the impact of calibration
changes on our results. One of the calibration issues
that can affect the measurement of cluster emission is the uncertainty in
the contamination of the optical blocking filter, which causes
a reduction in the low energy quantum efficiency of the \it Chandra\rm\ detectors.
The spatial and time dependence of this contaminant affects primarily
the effective area at $\leq$0.7 keV~\footnote{See \cite{marshall2004} and
\it Chandra\rm\ calibration memos at cxc.harvard.edu.}, with an estimated residual
error of $\leq$ 3\% at higher energy. We therefore limit our spatial and spectral
analysis to the $\geq$0.7~keV band.
The superior angular resolution of the \it Chandra\rm\ mirrors \citep{weisskopf2000}
results in a point-spread function with a 0.5~arcsec FWHM, and therefore
there is negligible contribution from the bright cluster core to
the emission in the outer annuli, and from secondary scatter (stray light) by sources outside
the field of view.
The subtraction of particle and sky background is one of the
most crucial aspects of the analysis of low surface brightness cluster regions.
We use \it Chandra\rm\ blank-sky background observations,
rescaled according to the high-
energy flux of the cluster, to ensure a correct subtraction
of the particle background that is dominant at $E\geq9.5$~keV,
where the Chandra detectors have no effective area.
The temporal and spatial variability of the soft X-ray background at $E<2$~keV
also requires that a peripheral region free of cluster emission
is used to measure any local enhancement (or deficit) of soft X-ray
emission relative to that of the blank-sky fields, and account for this
difference in the analysis. This method
is accurate for the determination of the temperature profile, but may result
in small errors in the measurement of the surface brightness profile.
In fact, the blank-sky background is a combination of a particle component
that is not vignetted, and a sky component that is vignetted. To determine the
surface brightness of the cluster and of the local soft X-ray background,
a more accurate procedure consists of subtracting the non-vignetted
particle component as measured from \it Chandra\rm\ observations in which
the ACIS detector was stowed \cite[e.g.,][]{hickox2007}, after rescaling the
stowed background to match the $E\geq9.5$~keV cluster count rate, as in
the case of the blank-sky background.
Point sources are identified and removed using a wavelet detection method
that correlates the cluster observation with wavelet functions of
different scale sizes (\emph{wavdetect} in CIAO). Subtraction
of point soures from the blank-sky observations were
performed by eye, with results that closely match those
of the wavelet method.
\begin{figure}
\centering
\includegraphics[width=2.5in, angle=-90]{Sx_stow_07-2.ps}
\caption{Exposure corrected surface brightness profile of \emph{Abell~1835}\ in the soft X-ray band (0.7-2 keV),
obtained by subtraction of the particle background from the ACIS stowed
observations. The radii $r_{500}$, $r_{200}$\ and the virial radius ($\sim r_{100}$) are
estimated from the data in Section~\ref{sec:r500} (see Table~\ref{tab:vikh-masses}).
The dashed red line is the average background level in the region $\geq$ 700 arcsec.}
\label{fig:Sx}
\end{figure}
\subsection{Measurement of the surface brightness profile with Chandra}
The surface brightness profile obtained using this background subtraction
is shown in Figure~\ref{fig:Sx}, in which the red line represents
the average value of the background at radii $\geq$~700 arcsec, where
the surface brightness profile is consistent with a constant level.
To determine the outer radius at which \it Chandra\rm\ has a significant detection
of the cluster, we also include sources of systematic errors in our analysis.
One source of uncertainty is the error in the measurement of the background level,
shown in Figure~\ref{fig:Sx-closeup} as the solid red lines.
The error is given by the standard deviation of the weighted mean of the datapoints
at radii greater than 700~arcsec,
to illustrate that
each bin in the surface brightness profile beyond this radius
is consistent with a constant level of the background.
Another source of uncertainty is the amount by which the stowed background is to
be rescaled to match the cluster count rate at high energy. The stowed background
dataset applicable to the dates of observation of \emph{Abell~1835}\ has an exposure
time of 367~ksec, and the relative error in the rescaling of the background to match
the cluster count rate at high energy is 0.7\%, as determined by the Poisson
error in the photon counts at high energy.
Moreover, \cite{hickox2006}
has shown that the spectral distribution of the particle background is remarkably stable,
even in the presence of changes in the overall flux, and that
the ratio of soft-to-hard (2-7 keV to 9.5-12 keV) count rates remains constant to within $\leq$2~\%. We therefore
apply a systematic error of 2~\% in the stowed background flux, to account for this
possible source of uncertainty, in addition to the 0.7\% error
due to the uncertainty in the rescaling of the background.
\begin{figure}
\centering
\includegraphics[width=2.5in, angle=-90]{Sx_stow_07-2_closeup.ps}
\caption{Close-up view of Figure~\ref{fig:Sx}, in which the red lines represent the
1-$\sigma$ confidence in the background level as determined from the $\geq$700~arcsec region,
and the green error bars combine the statistical and systematic errors in the determination
of the surface brightness.
}
\label{fig:Sx-closeup}
\end{figure}
In Figure~\ref{fig:Sx-closeup} the green error
bars represent the cumulative effect of the statistical error due to the counting
statistic, and the
sources of errors associated with the use of the stowed background; the systematic
errors were added linearly to the statistical error as a conservative measure.
This error analysis shows that the emission from \emph{Abell~1835}\ remains significantly
above the background beyond $r_{200}$\ and
until approximately a radius of 600 arcsec, or approximately 2.4~Mpc.
The significance of the detection in the region 450-600" (the
five datapoints in Figure~\ref{fig:Sx-closeup} after the $r_{200}$ marker)
is calculated as 5.5$\sigma$, and is obtained by using the larger systematic
error bars for the surface brightness profile (in green in Figure~\ref{fig:Sx-closeup}),
added in quadrature to the error in the determination of the background level
from the $\geq 700$" region (red lines in Figure~\ref{fig:Sx-closeup}).
To further test the effect of the background subtraction, we repeat our
backround subtraction process using the $\geq 600$" region
(instead of the $\geq 700$" region) . The background level
increases by less than 1$\sigma$ of the value previously determined (e.g., the
two levels are statistically indistinguishable), and the significance of detection in
the region 450-600" is 4.7$\sigma$. Therefore we conclude that it is unlikely that
the excess of emission beyond $r_{200}$\ and out to the virial radius is due to errors in the background
subtraction process.
A similar result can be obtained including the 2-7 keV band,
but the signal-to-noise is reduced because at large radii this band is
dominated by the background due to the softening of the cluster emission.
We estimate $r_{200}$\ and the virial radius ($\sim r_{100}$) from
the \it Chandra\rm\ data in Section~\ref{sec:r500}.
\subsection{Measurement of the
surface brightness profile with the ROSAT Position Sensitive Proportional Counter}
\it ROSAT\rm\ observed \emph{Abell~1835}\ on July 3--4 2003 for 6~ks with the Position
Sensitive Proportional Counter (PSPC), observation ID was 800569.
The PSPC has a 99.9\% rejection of particle background in the 0.2-2~keV band
\citep{plucinsky1993} and an average angular resolution of $\sim$30~arcsec that makes it
very suitable for observations of low surface brightness objects such as
the outskirts of galaxy clusters \citep[e.g.][]{bonamente2001,bonamente2002,bonamente2003}.
We reduce the event file following the procedure described in \cite{snowden1994}
and \cite{bonamente2002}, which consists of corrections for detector gain fluctuations, and
removal of periods with a \emph{master veto} rate of $\leq$170 counts~s$^{-1}$ in order
to discard periods of high background. These filters result in a clean exposure time of
5.9~ks.
Since the PSPC background is given only by the photon background, we generate an
image in the 0.2-2 keV band and use the exposure map to correct for the position--dependent
variations in the detector response and mirror vignetting.
We masked out the two low-mass cluster regions as we did for the \it Chandra\rm\ data and
all visible point sources, and obtained and exposure-corrected surface brightness profile
out to a radial distance of $\sim$20 arcmin, which corresponds to the location of the
inner support structure of the PSPC detector. The \it ROSAT\rm\ surface brightness profile therefore
covers the entire azimuthal range.
In Figure~\ref{fig:rosat} we show the radial profile of the surface brightness in the 0.2-2 keV
band, showing a $\sim$2~$\sigma$ excess of emission in the 400-600" region using the background
level calculated from the region $\geq$700", as done for the \it Chandra\rm\ data.
The \it ROSAT\rm\ data therefore provide additional evidence of emission beyond $r_{500}$\ and out to the
virial radius, although the short \it ROSAT\rm\ exposure does not have sufficient number of counts
to provide a detection with the same significance as in the \it Chandra\rm\ data.
\begin{figure}
\includegraphics[width=2.4in,angle=-90]{./Sx-rosat.ps}
\caption{Surface brightness profile in 0.2-2 keV band from a 6~ks
observation with ROSAT PSPC. The background level is determined from
the data at radii $\geq$~700", as in the \it Chandra\rm\ data.}
\label{fig:rosat}
\centering
\end{figure}
\section{Analysis of the Chandra spectra}
\label{sec:kT}
\subsection{Measurement of the temperature profile of Abell~1835}
\label{sec:spectral-fits}
We measure the temperature profile of \emph{Abell~1835}\ following
the background subtraction method described in Sec.~\ref{sec:Sx},
which makes use of the blank-sky background dataset and a
measurement of
the local enhancement of the soft X-ray background, as
is commonly done for \it Chandra\rm\ data \citep[e.g.][]{vikhlinin2006, maughan2008, bulbul2010}.
In Figure~\ref{fig:soft-back} we show the spectral distribution
of the local soft X-ray background enhancement, as determined from
a region beyond the virial radius ($\geq$700~arcsec);
this emission was modelled with an APEC emission
model of $kT\sim 0.25$~KeV and of Solar abundance, consistent with
Galactic emission, and then subtracted from all
spectra. The spectra were fit in the 0.7-7 keV band using the
minimum $\chi^2$ statistic, after binning to ensure that there are
at least 25 counts per bin. We use XSPEC version 12.6.0s
for the spectral analysys.
\begin{figure}
\centering
\includegraphics[width=2.3in,angle=-90]{backSpectrum.ps}
\caption{Spectrum of the local enhancement of the soft X-ray
background from observation 6880. The other two exposures have
similar levels of soft X-ray fluxes above the blank-sky emission,
which is modeled as an unabsorbed $\sim0.25$~keV thermal plasma at $z=0$.
The best-fit model has a $\chi^2_{min}=73.9$ for 78 degrees of freedom,
for a null hypothesis probability of 61\%.}
\label{fig:soft-back}
\end{figure}
\begin{figure*}
\centering
\includegraphics[width=2.5in,angle=-90]{330-450.ps}
\includegraphics[width=2.5in,angle=-90]{450-600.ps}
\caption{Blank-sky background subtracted spectra of
regions 330-450" and 450-600" from observation 6880.
The solid lines are the best-fit model of the local
soft X-ray enhancement of Figure~\ref{fig:soft-back} (red),
and its 90\% upper limit (green).}
\label{fig:spectra}
\end{figure*}
In Figure~\ref{fig:spectra} we show the spectra of
the outermost two regions, to show the impact of the
soft X-ray residuals in the background subtraction.
The importance of background systematics in the detection
of emission and measurement of cluster temperatures for
regions of low surface brightness was
recently addressed by \cite{leccardi2008} using \it XMM-Newton\rm\ data.
For our \it Chandra\rm\ observations, the two main sources of uncertainty when determining the temperature
of the outer regions are the subtraction of the blank-sky background, and
the subtraction of the locally-determined soft X-ray background.
Table~\ref{tab:background} reports the statistics of the background relevant
to the outer regions of the cluster, with both regions $\sim$~10-20\% above
the blank-sky background, determined with a precision of 1-2\%.
The additional soft X-ray background accounts for a significant portion
of the remaining signal, as shown in Figure~\ref{fig:spectra}; the 90\%
upper limit to the measurement of this background is shown as the
green lines, and emission from the cluster is still detected with
high statistical significance. Both sources of error are
included in the temperature measurements at large radii.
\begin{table*}
\centering
\caption{Background levels in outer regions}
\label{tab:background}
\begin{tabular}{lccc}
\hline
& \multicolumn{3}{c}{Observation ID}\\
& 6880 & 6881 &7370\\
\hline
Exposure time (ks) & 114.1 & 36.0 & 39.5 \\
Correction to Blank-sky Subtraction$^{a}$ & -0.04$\pm$0.01 & -0.125$\pm$0.015 & -0.04$\pm$0.015 \\
\hline
\multicolumn{4}{c}{Region 330-450"}\\
Total Counts & 18,124 & 4,938 & 5,686 \\
Count rate (c s$^{-1}$) & 0.158$\pm$0.001 & 0.137$\pm$0.002 & 0.144$\pm$0.002\\
Net count rate$^{b}$ ($10^{-2}$ c s$^{-1}$) & $2.75\pm0.10$ & $2.54\pm0.20$ & $2.36\pm0.20$ \\
Percent above back. & 17.4$\pm$0.6 & 18.2$\pm$1.4 & 16.4$\pm$1.4 \\
SXB count rate ($10^{-3}$ c s$^{-1}$) & 3.34$\pm$0.77 & 7.20$\pm$0.94 & 7.10$\pm$0.71 \\
\hline
\multicolumn{4}{c}{Region 450-600"} \\
Total Counts & 15,811 & 4,901 & 5,483 \\
Count rate (c s$^{-1}$) & 0.139$\pm$0.001 & 0.136$\pm$0.002 & 0.139$\pm$0.002 \\
Net count rate$^{b}$ ($10^{-2}$ c s$^{-1}$) &1.02$\pm$0.10 & 1.58$\pm$0.20 & 1.23$\pm$0.20 \\
Percent above back. & 7.3$\pm$0.7 & 11.6$\pm$1.5 & 8.8$\pm$1.4 \\
SXB count rate ($10^{-3}$ c s$^{-1}$) & 3.06$\pm$0.70 & 7.50$\pm$0.98 & 7.32$\pm$0.73\\
\hline
\end{tabular}
\flushleft
$a$: This is the fractional correction of the blank-sky data, to match the high-energy flux
in the cluster observation. \\
$b$ This is the background-subtracted count rate, including cluster and soft X-ray background
(SXB) signal.
\end{table*}
We use the APEC code \citep[][code version 1.3.1]{smith2001} to model the \it Chandra\rm\ spectra, with a
fixed Galactic HI column density of $N_H=2.04\times10^{20}$~cm$^{-2}$ \citep{kalberla2005}.
The region at radii $\leq 330$" have a variable metal abundance, while the outer
region have a fixed abundance of $A=0.3$. In addition to the statistical errors
obtained from the XSPEC fits, we add a systematic error of 10\% in the
temperature measured in the core and a 5\% error to the other region,
to account for possible systematic uncertainties due to the \it Chandra\rm\
calibration \citep[see, e.g.,][]{bulbul2010}.
One possible source of systematic uncertainty in our results is indicated
by the systematic difference between the \it Chandra\rm/ACIS and\it XMM-Newton\rm/EPIC
temperature measurements of galaxy clusters \citep{nevalainen2010}
This amounts to a $\pm$10\% bias in the calibration of the effective area at 0.5 keV,
which decreases roughly linearly towards 0\% bias at 2 keV.
Assuming that \it XMM-Newton\rm/pn has a more accurately calibrated effective area,
we reduced the \it Chandra\rm\ effective area by multiplying it with a linear function
as indicated by the \it Chandra\rm/\it XMM-Newton\rm\ comparison. As a result, the temperature at the
outermost radial bin decreases by $\sim$ 5\%. Thus, the cross-calibration
uncertainties between \it Chandra\rm\ and \it XMM-Newton\rm\ do not explain the low temperature we measure in the outermost radial bin.
Uncertainties in the Galactic column density of HI do not impact significantly our results.
Changing the value of $N_H$ by $\pm$10\%, consistent with the variations between the \cite{kalberla2005}
and the \cite{dickey1990} measurements, results in a change of best-fit temperature
in each bin by less than 2\%.
Given the emphasis of this paper on the detection of
emission at large radii, we investigate
the sources of uncertainty caused by the background subtraction
in the outer region at $\geq$330". We report the results of this
error analysis in Table~\ref{tab:kT-err}, where \emph{cornorm} refers
to the normalization of the blank-sky background, and \emph{soft residuals}
refers to the normalization of the soft X-ray residual model, as
reported in Table~\ref{tab:background}. In the analysis that follows,
we add the systematic errors caused by these sources linearly to the
statistical error.
Our data do not constrain well the metal abundance of the plasma
in the outer regions.
Using an abundance of $A=0.5$ instead of the nominal $A=0.3$
leads to negligible changes in the best-fit temperature for both of
the outer annuli.
In the extreme case of an $A=0.0$ metal abundance,
both regions have an acceptable fit with the best-fit temperatures
change respectively by $+6$\% for the 330-450" region ($\Delta \chi^2=+1.3$), and
by $-22$\% for the 450-600" region ($\Delta \chi^2=+9.2$,
best fit decreases from 1.26 to 0.98 keV).
We therefore find that, in the case of exceptionally low metalllicity, the
temperature profile we measure from these \it Chandra\rm\ data would be even significantly steeper
than indicated by the result in Table~\ref{tab:kT-err}.
Given that these data do not provide direct indication that the plasma in the outer regions
may have null metal content, we do not fold in this source of systematic error in the
analysis that follows.
\begin{table}
\caption{Temperature measurement and error analysis from
the \it Chandra\rm\ data.}
\centering
\label{tab:kT-err}
\begin{tabular}{lcc}
\hline
Region & \multicolumn{2}{c}{Projected Temperature (keV)}\\
\hline
& Measurement$^{a}$ & Calibration error$^{b}$ \\
0-10" & 4.78$\pm$0.06 & $\pm$0.48 \\
10-20" & 7.09$\pm$0.14 & $\pm$0.71 \\
20-30" & 8.72$\pm$0.27 & $\pm$0.87 \\
30-60" & 9.47$\pm$0.21 & $\pm$0.47 \\
60-90" & 10.57$\pm$0.33 & $\pm$0.53 \\
90-120" & 9.97$\pm$0.44 & $\pm$0.50 \\
120-180"& 9.68$\pm$0.49 & $\pm$0.48 \\
180-240" & 7.85$\pm$0.65 &$\pm$0.39 \\
240-330" & 6.02$\pm$0.65 &$\pm$0.30 \\
330-450" & 3.75$\pm$0.72 & $\pm$0.19\\
450-600" & 1.26$\pm$0.16 & $\pm$0.06 \\
\hline
\multicolumn{3}{c}{Measurement of Temperature Using} \\
\multicolumn{3}{c}{Background Systematic Errors (keV)}\\
& $+1\sigma$ cornorm$^{c}$ & $-1\sigma$ cornorm \\
330-450" & 3.02$\pm$0.54 & 4.67$\pm$1.00 \\
450-600" & 1.09$\pm$0.10 & 1.31$\pm$0.18 \\
& $+1\sigma$ soft res.$^{d}$ & $-1\sigma$ soft res.\\
450-600" & 4.53$\pm$1.03 & 3.05$\pm$0.54\\
450-600" & 1.37$\pm$0.25 & 1.16$\pm$0.12\\
\multicolumn{3}{c}{Summary of Background Systematic Errors$^{e}$} \\
330-450" & \multicolumn{2}{c}{$\pm 0.83\pm 0.74$ keV} \\
450-600" & \multicolumn{2}{c}{$\pm 0.11\pm 0.10$ keV} \\
\hline
\end{tabular}
\flushleft
$a$: Uncertainty is 1$\sigma$ statistical error from counting statistics only.\\
$b$: Includes \it XMM-Newton\rm/\it Chandra\rm\ cross-calibration uncertainty of the effective area \citep{nevalainen2010}.\\
$c$: This is temperature obtained by varying by $\pm 1\sigma$ the fractional
correction of the blank-sky data, to match the high-energy flux in the cluster observations.\\
$d$: This is the temperature obtained by varying by $\pm 1\sigma$ the normalization
of the best-fit model to the soft X-ray background residuals.\\
$e$: Obtained from the average deviation of the $\pm 1\sigma $ `cornorm' and 'soft. res'
measurements from the measurement with nominal values of these parameters.
\end{table}
\cite{sanders2010} measured temperature profiles for \emph{Abell~1835}\ out
to approximately $r_{500}$\ with \it Chandra\rm\ and \it XMM-Newton\rm. Using the same \it Chandra\rm\
observations we analyze in this paper, their temperature profile
has a similar drop from the peak value to their outermost
annulus ($322\pm42$"), where they measure a temperature of kT=$4.67\pm^{0.82}_{0.52}$~keV
that is consistent with our measurements. Likewise, from the \it XMM-Newton\rm\ data their outermost
radial bin ($300\pm10$") has a temperature of kT=$5.2\pm^{1.2}_{0.7}$~keV, also
in agreement with our results.
The only measurement of the \emph{Abell~1835}\ temperature to the virial radius
available in the literature is that of \cite{snowden2008}, who
does report a temperature profile out to a distance of 12~arcmin
from a long \it XMM-Newton\rm\ observation (and out to 7' from a shorter observation).
In particular,
they report a temperature of $kT=3.14\pm0.93$ for the region
420-540", which straddles our measurements at 330-450" ($3.75\pm0.72$~keV)
and at 450-600" ($1.26\pm0.16$, statistical errors only). The same paper
also reports a measurement of $kT=3.33\pm1.75$~keV for the region 540-720",
i.e., beyond our outer annulus. Their temperature is somewhat higher that
ours, although the large error bars cannot exclude that the \it Chandra\rm\
and \it XMM-Newton\rm\ measurements
are consistent. Therefore our results confirm and extend the
earlier \it XMM-Newton\rm\ analysis of \cite{snowden2008}.
\subsection{Measurement of the average temperature of
MAXBCG J210.31728+02.75364
and WHL J140031.8+025443}
\label{sec:low-mass-clusters}
We also measure the temperature of the two SDSS clusters detected
in our \it Chandra\rm\ images,
MAXBCG J210.31728+02.75364
and WHL J140031.8+025443. The two clusters are located between a distance of $\sim$380-650"
from the cluster center, and therefore we start by extracting a spectrum for this annulus
excluding two regions of 1.5' radius centered at the two clusters.
This radius was determined by visual inspection, after smoothing of the \it Chandra\rm\ image with a
Gaussian kernel of $\sigma=6$ arcsec.
For this annulus, we measure a temperature of $kT=1.85\pm0.36$~keV for a fixed
abundance of $A=0.3$ Solar. We then use this spectrum as the local background for the
two cluster regions, and measure a temperature $kT=2.73\pm^{0.93}_{0.54}$~keV i
for MAXBCG J210.31728+02.75364
(357 source photons, 19\% above the average emission of the annulus),
and $kT=2.09\pm^{4.6}_{0.55}$~keV for WHL J140031.8+025443 (538 photons, 27\% above background).
For both clusters, we assumed the same Galactic $HI$ column density as for \emph{Abell~1835}, and
a fixed metal abundance of $A=0.3$ Solar. For both clusters we also extract spectra in
regions larger than 1.5', and determine that no additional source photons are present
from these two clusters beyond this radius.
\subsection{Tests of robustness of the temperature measurement at large radii}
\label{sec:robustness}
To further test the measurement of temperatures especially at large radii,
where
the background subtraction is especially important,
we also measure the temperature profile using the same stowed
background data that was used for the surface brightness measurement
of Figures~\ref{fig:Sx} and \ref{fig:Sx-closeup}. As in the case
of the blank-sky background, we first rescale the stowed data to match
the high-energy count rate of the cluster observation, and
use a region at large radii ($\geq$ 700 arcsec) to measure
the local X-ray background. We model the background using
an APEC plus a power-law model, the latter component necessary
to model the harder emission due to unresolved AGNs that is typically removed when
the blank-sky background is used instead, and apply this
model to all cluster regions. We find that the temperature profile
is consistent within the 1~$\sigma$ statistical errors
of the values provided in Table~\ref{tab:kT-err} for each region,
and therefore conclude that the temperature drop at large radii, and
especially in the outermost region, is not sensitive
to the background subtraction method.
The temperature measurement is also dependent on an accurate subtraction
of background (and foreground) sources of emission.
Point sources in the field of view are detetected using the CIAO
tool \emph{wavdetect}, which correlates the image with wavelets
at small angular scales (2 and 4 pixels, one pixel is 1.96"), searches the results for 3-$\sigma$ correlations, and
returns a list of elliptical regions to be excluded from the analysis.
We study in particular the
effect of background sources on the measurement of the temperature in the outermost
annulus (450-600"). In this region, \emph{wavdetect} finds
24 point sources, plus portions of the two low-mass galaxy clusters
described in Section~\ref{sec:low-mass-clusters}.
We extract a spectrum for this region from the longest observation (ID 6880),
and now include in the spectrum all
point sources excluded in the previous analysis.
We find a count rate of $3.20\pm0.13\times 10^{-2}$ counts~s$^{-1}$, compared
to the point source-subtracted rate of $1.02\pm0.10\times 10^{-2}$ counts~s$^{-1}$,
corresponding to an increase in background-subtracted flux by a factor of three.
We then fit the spectrum with the same APEC model as described in Section~\ref{sec:spectral-fits},
and find a best-fit temperature of $kT=1.96\pm0.17$~keV for a best-fit goodness statistic
of $\chi^2=537$ for 429 degrees of freedom (or $\chi^2_{red}$=1.25), compared
to the temperature of $1.22\pm0.19$~keV for a $\chi^2=415$ for 389 degrees of freedom (or $\chi^2_{red}$=1.08).
We therefore conclude that an accurate subtraction of point sources and unrelated
sources of diffuse emission is crucial to obtain an accurate measurement of the
temperature profile, especially in regions of low-surface brightness such as those
near the virial radius.
Changes in the instrument calibration affect the measurement of temperatures.
We therefore repeat the same data reduction and spectral analysis using the latest
software and calibration database available at time of writing (CIAO 4.4 and CALDB 4.5.1) for
the longest observation (ID 6880),
and obtain a new temperature profile for the same regions as reported in Table~\ref{tab:kT-err}.
In the outermost two regions, we measure a temperature of 3.04$\pm0.69$~keV (330-450") and
1.23$\pm$0.21~keV (450-600"), well within the 1-$\sigma$ confidence intervals of the measurements
using the older calibration (3.40$\pm$0.76 and 1.22$\pm$0.19 respectively, also in agreement with the
values of Table ~\ref{tab:kT-err} obtained from the combination of all exposures).
The temperature of the inner regions are also always within 1-$\sigma$
of the results obtained with the earlier calibration, and we therefore conclude that changes in the instrument
calibration do not affect significantly our results.
\section{Measurement of masses and gas mass fraction}
We fit the surface brightness and the temperature profiles with
the \cite{vikhlinin2006} model. The electron density is modelled
with a double-$\beta$ profile modified by a cuspy core component and an exponential cutoff at large radii,
for a total of eleven model parameters;
the temperature has both a cool-core component to follow
the cooler gas in the core, and a decreasing profile at large radii, for an additional
nine parameters. For our analysis, we follow \cite{vikhlinin2006} and fix the
$\gamma$=3.0 parameter, and do not use the cuspy-core component ($\alpha=0$)
or the second $\beta$-model component, so that the density is modelled by just
one $\beta$-model with an exponential cutoff, for just four free parameters
(core radius $r_c$, exponent $\beta$, scale radius $r_s$ and exponential cutoff
exponent $\epsilon$, see Table~\ref{tab:vikh-fit}).
For the temperature profile, we fix the parameter $a=0$, and the
remaining eight parameters are reported in Table~\ref{tab:vikh-fit}.
We use a Monte Carlo Markov chain (MCMC) method that we used in
previous papers \citep[e.g.,][]{bonamente2004,bonamente2006}.
The MCMC analysis consists of a projection of the three-dimensional
models and a comparison
of the projected surface brightness and temperature profiles,
and results in simultaneous estimation
of the posterior distributions of all model paramters. Uncertainties in the parameters are obtained
from the posterior distributions, with 1-$\sigma$ errors assigned using the 68.3\%
confidence interval around the median of the distribution.
The gas mass is directly calculated from the electron density
model parameters via
\begin{equation}
M_{gas}(r) = m_p \mu_e \int_0^r n_e(r) 4 \pi r^2 dr
\end{equation}
and the total gravitational mass via the equation of
hydrostatic equilibrium,
\begin{equation}
M(r) = - \frac{kT(r) r}{\mu_e m_p G} \left(\frac{d \ln n_e}{d \ln r} + \frac{d \ln kT}{d \ln r} \right),
\label{eq:hse}
\end{equation}
where $m_p$ is the proton mass, $\mu_e \simeq 1.17$ the mean electron molecular weight, and
$G$ the gravitational constant. The total density of matter is simply obtained via
\begin{equation*}
\rho(r) = \frac{1}{4 \pi r^2} \frac{d M(r)}{dr}
\end{equation*}
and therefore can be obtained via a derivative of the mass profile.
In Equation~\ref{eq:hse}, the term $A = d \ln n_e/d \ln r + d \ln kT/d \ln r$
and its first derivative are always negative, as is $d kT(r)/d r$ at large radii.
Therefore, the density can be rewritten as
\begin{equation}
\rho(r) = - \frac{1}{4 \pi r^2 \mu_e m_p G} \left[ kT \left( A +r \frac{dA}{dr} \right) + r A \frac{d kT}{dr} \right]
\label{eq:density}
\end{equation}
in which the only negative term is the one containing $A \cdot d kT(r)/d r$, while
the other two terms remain positive out to large radii.
\subsection{Modelling of the Chandra data out to the virial radius}
\label{sec:hse}
The \cite{vikhlinin2006} model provides a satisfactory fit
out to the outermost radius of 600";
Figure~\ref{fig:kt-0-600-fit} shows the best-fit models to the temperature
and surface brightness profiles,
best-fit parameters
of the model are reported in Table~\ref{tab:vikh-fit}.
The temperature profile measured by \it Chandra\rm\ in Figure~\ref{fig:kt-0-600-fit}
is so steep that it causes the total matter density $\rho(r)$ to become \emph{negative}
at approximately 400", indicating that the temperature profile
cannot originate from gas in hydrostatic equilibrium.
The situation is illustrated in Figure~\ref{fig:mass-0-600}, where the relevant terms
of Equation~\ref{eq:density} are plotted individually;
the density inferred from hydrostatic equilibrium becomes negative
where the
negative term crosses the positive ones,
and the mass profile has a negative slope beyond that point. These fit parameters
therefore lead to an unacceptable situation, and responsibility for this inconsistency
can be attributed to an overly steep temperature profile, with a drop by a factor of
ten between approximately 1.5' to 10'.
\begin{figure*}
\centering
\includegraphics[width=2.25in, angle=-90]{vikh_temp_0-600chain_90CI.ps}
\includegraphics[width=2.25in, angle=-90]{SB_vikh_0-600chain_0-600_600-1100.ps}
\caption{Left: Best-fit Vikhlinin model for the projected temperature
profile out to 600", with 90\% confidence intervals. Right: Best-fit
Vikhlinin model to the 0.7-2 keV surface brightness (model+background) profile.
Emission beyond 600" is statistically
consistent with the background, in blue is the extrapolation out to 1100".
Prior removal of the stowed background
caused the lower backgroud level in Figure~\ref{fig:Sx}.
}
\label{fig:kt-0-600-fit}
\end{figure*}
\begin{table*}
\centering
\caption{Best-fit parameters for the Vikhlinin model using \it Chandra\rm\ data out to 330"}
\label{tab:vikh-fit}
\begin{tabular}{cccccccccc}
\hline
$n_{e0}$ & $r_{c}$ & $\beta$ & $r_{s}$ & $\epsilon$ & $n_{e02}$ & $\gamma$ & $\alpha$ & $\chi^{2}_{tot} \textrm{(d.o.f.)}$\\
(10$^{-2} $cm$^{-3}$) & (arcsec) & & (arcsec) & & & & & &\\
\hline
\multicolumn{10}{c}{Using \it Chandra\rm\ data out to 330"}\\
$9.602\pm^{0.488}_{0.415}$ & $6.743\pm^{0.373}_{0.403}$ & $0.498\pm^{0.009}_{0.009}$ & $119.8\pm^{13.3}_{13.4}$ & $1.226\pm^{0.098}_{0.097}$
& 0.0 & 3.0 & 0.0 & \nodata\\
\hline
\multicolumn{10}{c}{Using \it Chandra\rm\ data out to 600"}\\
$9.763\pm^{0.447}_{0.450}$ & $6.346\pm^{0.385}_{0.343}$ & $0.488\pm^{0.009}_{0.009}$ & $96.44\pm^{9.55}_{8.67}$ & $1.067\pm^{0.075}_{0.079}$
& 0.0 & 3.0 & 0.0 & \nodata\\
\hline
\hline
$T_{0}$ & $T_{min}$ & $r_{cool}$ & $a_{cool}$ & $r_{t}$ & $a_{t}$ & $b_{t}$ & $c_{t}$ &\\
(keV) & (keV) & (arcsec) & & (arcsec) & & & & &\\
\hline
\multicolumn{10}{c}{Using \it Chandra\rm\ data out to 330"}\\
$38.25\pm^{19.63}_{17.23}$ & 3.0 & $92.48\pm^{52.63}_{40.52}$ & 1.0 & $257.5\pm^{143.0}_{66.72}$ & 0.0 & $1.024\pm^{0.426}_{0.283}$ & 2.0 & 39.0 (83)\\
\hline
\multicolumn{10}{c}{Using \it Chandra\rm\ data out to 600"}\\
$10.17\pm^{0.85}_{0.60}$ & 3.0 & $11.82\pm^{3.61}_{2.29}$ & $1.924\pm^{0.802}_{0.568}$ & 600.0 & 0.0 & $2.800\pm^{0.224}_{0.210}$ & 10.0 & 106.4 (154)\\
\hline
\end{tabular}
\end{table*}
\begin{table*}
\centering
\caption{Masses Calculated using \it Chandra\rm\ data out to 330", and Extrapolated out to $r_{100}$}
\label{tab:vikh-masses}
\begin{tabular}{ccccc}
\hline
$\Delta$ & $r_{\Delta}$ & $M_{gas}$ & $M_{total}$ & $f_{gas}$\\
& (arcsec) & $\times 10^{13}~M_{\odot}$ & $\times 10^{14}~M_{\odot}$ & \\
\hline
2500 & $164.9\pm^{4.1}_{3.9}$ & $4.70\pm^{0.15}_{0.14}$ & $5.03\pm^{0.38}_{0.35}$ & $0.093\pm^{0.004}_{0.004}$\\
500 & $326.6\pm^{7.1}_{6.9}$ & $10.75\pm^{0.23}_{0.23}$ & $7.80\pm^{0.52}_{0.49}$ & $0.138\pm^{0.006}_{0.006}$\\
200 & $453.3\pm^{15.2}_{15.1}$ & $15.36\pm^{0.48}_{0.48}$ & $8.35\pm^{0.86}_{0.81}$ & $0.184\pm^{0.014}_{0.012}$\\
100 & $570.9\pm^{26.6}_{25.3}$ & $19.53\pm^{0.84}_{0.82}$ & $8.34\pm^{1.22}_{1.06}$ & $0.234\pm^{0.024}_{0.022}$\\
\hline
\hline
\end{tabular}
\end{table*}
\begin{figure*}
\centering
\includegraphics[width=2.3in, angle=-90]{mass-0-600.ps}
\includegraphics[width=2.3in, angle=-90]{terms-0-600.ps}
\caption{Mass profile using data out to 600" and the temperature fit of Figure~\ref{fig:kt-0-600-fit},
and the radial distribution of the positive and negative terms in the density
equation (Equation~\ref{eq:density}).}
\label{fig:mass-0-600}
\end{figure*}
The results presented in this section provide
evidence that the gas detected by \it Chandra\rm\ near the virial radius is
\emph{not} in hydrostatic equilibrium, and a number of theoretical
studies do in fact suggest that beyond $r_{500}$\ the intergalactic
plasma is not supported solely by thermal pressure \citep[e.g.][]{lau2009}.
\it Suzaku\rm\ has reported the measurement of emission near the
virial radius for several clusters, including \emph{Abell~1413},
\emph{Hydra~A}, \emph{Perseus}, \emph{PKS0745-191}, \emph{Abell~1795}, \emph{Abell~1689}
and \emph{Abell~2029}
\citep{hoshino2010,sato2012,simionescu2011,george2009, bautz2009,kawaharada2010,
walker2012a,walker2012b}. Some of these results do in fact report an apparent
decrease in total mass with radius \citep[e.g.]{george2009,kawaharada2010}
and lack of hydrostatic equilibrium at large radii \citep[e.g.][]{bautz2009}, similar
to the results presented in this paper.
Temperature profiles measured by \it Suzaku\rm\ typically do not feature as extreme a temperature drop
as the one reported in Figure~\ref{fig:kt-0-600-fit}, i.e., a factor of nearly 10
from peak to outer radius, although in some cases the drop of temperature from the peak
value to that at $r_{200}$\ is consistent with the one reported in this paper.
\subsection{Modelling of the Chandra data out to $r_{500}$}
\label{sec:r500}
The steepening of the radial profile beyond 400" is driven by the
temperature of the last datapoint beyond $r_{200}$.
We also model the surface brightness and temperature profiles of the
\it Chandra\rm\ data out to only 330", or approximately $r_{500}$, and find the
best-fit \cite{vikhlinin2006} model for the temperature profile
reported in Figure~\ref{fig:kT-0-330} and Table~\ref{tab:vikh-fit}.
We measure a gas mass fraction of $f_{gas}(r_{500})=0.138\pm0.006$;
if we add the mean stellar fraction as measured by either \cite{giodini2009}
($f_{\star}=0.019\pm0.002$) or by \cite{gonzales2007} ($f_{\star}\simeq0.012$)
assuming $M(r_{500})=7.1\times 10^{14}$ $M_{\odot}$,
we find that \emph{Abell~1835}\ has an average baryon content within $r_{500}$\
that is consistent with the cosmic abundance of $\Omega_b/\Omega_M=0.167\pm0.007$ \citep{komatsu2011}
at the 2-$\sigma$ level. As is the case in most clusters, especially relaxed
ones, the radial distribution of the gas mass fraction increases with radius
\citep[e.g.,][]{vikhlinin2006}.
We use this modelling of the data to measure $r_{500}$, and to provide estimates
for $r_{200}$\ and the virial radius.
The extrapolation of this model to 600" now falls above the measured temperature profile,
and the mass profile using hydrostatic equilibrium is monotonic.
This best-fit model is marginally compatible with the assumption of hydrostatic equilibrium.
In fact, Table~\ref{tab:vikh-masses} shows that the extrapolated mass profile
flattens around $r_{200}$, with virtually no additional mass being necessary
beyond this radius to sustain the hot gas in hydrostatic equilibrium.
Moreover, between $r_{500}$\ and $r_{200}$, all of the gravitational mass is accounted
by the hot gas mass, i.e., \emph{no} dark matter is required beyond $r_{500}$.
This extrapolation of the $\leq$~$r_{500}$\ data to the virial radius therefore
leads to a dark matter halo that is much more concentrated than
the hot gas.
\section{Entropy profile and convective instability at
large radii}
\label{sec:entropy}
The Schwarzschild
criterion for the onset of convective instability is given by the
condition of buoyancy of an infinitesimal blob of gas that is displaced by an
amount $dr$, $d \rho_{blob} < d \rho$,
where $\rho_{blob}$ is the density of the displaced blob, assumed to attain pressure
equilibrium with the surrounding, and $\rho$ is the density of ambient medium.
If the blob is displaced adiabatically, using pressure $P$ and entropy $s$ as the
independent thermodynamic variables in the derivatives of $\rho_{blob}$ and $\rho$,
the buoyancy condition gives
\begin{equation}
\left. \frac{\partial \rho}{\partial s} \right|_{p} ds > 0
\label{eq:buoyancy}
\end{equation}
as condition for convective instability, i.e., a blob that is displaced radially outward will
find itself in a medium of higher density and continue to rise to larger radii. Since
$({\partial \rho}/{\partial s})_P=-\rho^2 (\partial T / \partial P)_s<0$ (material
is heated upon adiabatic compression), Equation~\ref{eq:buoyancy} simply
reads that \emph{a radially decreasing entropy profile is convective unstable}.
An ideal gas has an entropy of
\begin{equation}
S = \nu R \left( \frac{3}{2} \ln T - \ln \rho + C\right)
\end{equation}
where $\nu$ is the number of moles, $R$ is the gas constant, and $C$ is a constant.
In astrophysical applications, it is customary \citep[e.g.][]{cavagnolo2009} to use a definition
of entropy that is related to the thermodynamic entropy by an operation of
exponential and a constant offset,
\begin{equation}
S = \frac{kT}{n_e^{2/3}},
\label{eq:entropy}
\end{equation}
The entropy $S$ defined by Equation~\ref{eq:entropy} has
units
of keV cm$^{2}$, and it is required to be radially increasing to maintain convective equilibrium.
Numerical simulations
indicate that entropy outside the core is predicted to increase with radius approximately
as $r^{1.1}$ or $r^{1.2}$ \citep{voit2005,tozzi2001}.
In Figure~\ref{fig:entropy-profile} we show the radial profile of the entropy out to
the outer radius of 10 arcmin, with a significant decrease at large radii that indicates
an incompatibility of the best-fit model with convective equilibrium. For comparison,
we also show the entropy profile measured using the modelling of the data
out to only $r_{500}$, as described in Sec.~\ref{sec:r500}. This entropy profile
uses the shallower temperature profile of Figure~\ref{fig:kT-0-330}, and its
extrapolation to larger radii remains non-decreasing, i.e., marginally consistent
with convective equilibrium.
The Schwarzschild criterion
does not apply in the presence of a magnetic field. For typical
values of the thermodynamic quantities of the ICM, the electron and ion gyroradii are
several orders of magnitude smaller than the mean free path for Coulomb collisions
\citep[e.g.][]{sarazin1988}, even for a magnetic field of order 1 $\mu G$, and therefore
diffusion takes place primarily along field lines \citep[e.g.][]{chandran2007}.
There is strong evidence of magnetic
fields in the central regions of clusters \citep[e.g., radio halos, ][]{venturi2008,cassano2006},
though it is not clear whether magnetic fields are ubiquitous
near the virial radius, as in the case of Abell~3376 \citep{bagchi2006}.
In the presence of magnetic fields, \cite{chandran2007} has shown that the
condition for convective instability is simply $dT/dR<0$.
The \it Chandra\rm\ data out to the virial radius therefore indicate
that the ICM is convectively unstable, regardless of the
presence of a magnetic field. In fact, in the absence of magnetic
fields near the virial radius, Figure~\ref{fig:entropy-profile} shows that \emph{Abell~1835}\
fails the standard Schwarzschild criterion, i.e., the entropy decreases with radius;
in the presence of magnetic fields, the negative gradient in the temperature profile alone
is sufficient for the onset of convective instability
\citep[e.g., as discussed by ][]{chandran2007}.
Convective instabilities would carry hotter
gas from the inner regions towards the outer region within a few sound crossing
times. As shown by \cite{sarazin1988}, the sound crossing time for a 10~keV
gas is $\sim 0.7$~Gyr for a 1~Mpc distance,
and an unstable temperature gradient such as that of Figure~\ref{fig:kt-0-600-fit}
would be flattened by convection within a few Gyrs.
Convection could in principle also result in an additional pressure gradient
due to the flow of hot plasma to large radii, which can in turn help support the gas
against gravitational forces.
\begin{figure*}
\centering
\includegraphics[width=2.3in, angle=-90]{vikh_temp_0-330chain_extrapolated_to_600_90CI.ps}
\includegraphics[width=2.3in, angle=-90]{fgas_profile.ps}
\caption{
Temperature and gas mass fraction profiles measured from a fit to the \it Chandra\rm\ data out to 330", and extrapolation of the
best-fit model out to 600".}
\label{fig:kT-0-330}
\end{figure*}
\begin{figure*}
\centering
\includegraphics[width=2.3in, angle=-90]{entropy_0-600.ps}
\includegraphics[width=2.3in, angle=-90]{entropy_0-330.ps}
\caption{
Deprojected entropy profiles using the full \it Chandra\rm\ data out to 600" (left, see Section~\ref{sec:hse}),
and using only data out to $r_{500}$\ (right, see Section~\ref{sec:r500}).}
\label{fig:entropy-profile}
\end{figure*}
\section{Discussion and interpretation}
In this paper we have reported the detection of X-ray emission in \emph{Abell~1835}\ with \it Chandra\rm\ that extends out to approximately
the cluster's virial radius. The emission can be explained by the presence of a cooler
phase of the plasma that is dominant at large radii, possibly linked to the infall
of gas from large-scale filamentary structures. We also investigate the effects of clumping of the gas
at large radii, and conclude that in principle a radial gradient in the clumping factor of the hot ICM
can explain the apparent flattening of the entropy profile and the turn-over of the mass profile.
\subsection{Detection of X-ray emission out to the virial radius}
The detection of X-ray emission out to a radial distance of 10 arcmin, or approximately 2.4~Mpc,
indicates the presence of diffuse gas out to the cluster's virial radius.
This is the first detection of gas out to the virial radius with \it Chandra\rm, matching
other detections obtained with \it Suzaku\rm\ for nearby clusters
\citep[e.g.][]{akamatsu2011,walker2012a,walker2012b,simionescu2011,burns2010,kawaharada2010,
bautz2009,george2009}.
Despite its higher background, \it Chandra\rm\ provides a superior angular resolution to image and remove emission from unrelated sources.
As can be seen from Figure~\ref{fig:a1835}, there are approximately 100 point-like sources that were automatically
detected and removed, and we were also able to identify two low-mass clusters that are likely associated with \emph{Abell~1835}.
\it Chandra\rm\ therefore has the ability to constrain the emission of clusters to the virial radius, especially for higher-redshift
cool-core clusters for which the \it Suzaku\rm\ point-spread function would cause significant contamination from the
central signal to large radii.
It is not easy to interpret the emission at the outskirts as an extension
of the hot gas detected at radii $\leq$~$r_{500}$. In fact, as shown in Section~\ref{sec:hse}, the steepening of the
temperature profile is incompatible with the assumption of
hydrostatic equilibrium at large radii.
We also showed in Section~\ref{sec:entropy} that
the gas has a negative entropy gradient beyond this radius, rendering it convectively unstable.
Therefore, if the temperature profile of Figure~\ref{fig:kt-0-600-fit} originates from
a single phase of the ICM, convection would transport hotter gas towards the outskirts, flattening
the temperature profile within a few Gyrs. Cooling of the gas by thermal radiation cannot be
responsible for off-setting the heating by convection, since the cooling time
($t_{cool} \sim kT^{1/2} n_e^{-1}$) is longer at the outskirts than in the peak-temperature regions
due to the higher density.
\subsection{Warm-hot gas towards the cluster outskirts}
A possible interpretation for the detection of emission near the virial radius and its
steep temperature profile is the presence of a separate phase at the cluster outskirts
that is not in hydrostatic equilibrum with the cluster's potential.
In this case, cooler gas may be the result of infall from filamentary structures that
feed gas into the cluster, and the temperature of this \emph{warm-hot} gas may in fact be
lower than that shown in Figure~\ref{fig:kt-0-600-fit}
(i.e., $kT \sim 1.25$~keV for the region $\geq 450$") if
this gas lies in projection against the tail end of the hotter ICM.
We estimate the mass of this putative warm-hot gas assuming that all of the
emission from the outermost region is from a uniform density gas
seen in projection. This assumption may result in an overestimate
of the emission measure; in fact, the extrapolation of the gas density profile
in the hydrostatic or convective scenarios may yield a significant amount of
emission in the last radial bin.
We were unable to perform a self-consistent modelling
of the emission in the full radial range, since the low signal-to-noise
ratio does not allow a two-phase modelling in the last radial bin.
In this simple uniform density warm-hot gas scenario,
the gas is in a filamentary structure
of length $L$ and area $A=\pi(R_{out}^2-R_{in}^2)$, where
$R_{out}=600$" and $R_{in}=450"$; this is the same model
also considered in \cite{bonamente2005} for the cluster \emph{Abell~S1101}.
Since the length $L$ of the filament along the sightline is unknown,
we must either assume $L$ or the electron density $n_e$, and
estimate the mass implied by the detected emission.
The emission integral for this region is proportional to
\begin{equation}
K = \frac{10^{-14}}{4 \pi D_A^2 (1+z)^2} n_e^2 V,
\end{equation}
where $K$ is measured in XSPEC from a fit to the spectrum, $D_A$ is
the angular distance in cm, $z$ is the cluster redshift, and
the volume is $V=A \times L$. For this estimate we assume
for simplicity that
the mean atomic weights of hydrogen and of the electrons are
the same, $\mu_e=\mu_H$.
Using the best-fit spectral model with $kT=1.26\pm0.16$ keV,
we measure $K=1.05\pm 0.13 \times 10^{-4}$. If we assume a filament of
length $L=10$~Mpc, then the average density is $n_e=2.4\pm0.3$~cm$^{-3}$,
and the filament mass is $4.6\pm0.6 \times 10^{13}$~$M_{\odot}$.
Alternatively, a more diffuse filament gas of $n_e=10^{-5}$~cm$^{-3}$
would require a filament of length $L=58\pm8$~Mpc, with
a mass of $1.1\pm0.2\times 10^{14}$~$M_{\odot}$, comparable to the
entire hot gas mass within $r_{200}$. The fact that a lower density gas
yields a higher mass is given by the fact that, for a measured value of $K$
we obtain $n_e \propto L^{-1/2}$, and therefore the mass is proportional to $L^{1/2}$.
For comparison, the gas mass for this shell inferred from the standard
analysis, i.e., assuming that the gas is in the shell itself,
is $\sim 3\times 10^{13}$~$M_{\odot}$, as can be also seen from Table~\ref{tab:vikh-masses}.
If the gas is cooler, then the mass budget would increase further.
In fact, the bulk of the emission from cooler gas falls outside of the \it Chandra\rm\ bandpass,
and for a fixed number of detected counts the required emission integral increases.
We illustrate this situation by fitting the annulus to an emission
model with a fixed value of $KT=0.5$~keV, which result in a value
of $K=1.88\pm 0.24 \times 10^{-4}$ (the fit is significantly poorer, with
$\Delta \chi^2=+10$ for one fewer degree of freedom).
Accordingly, the filament mass estimates would be increased
approximately by a factor of two.
A warm-hot phase at $T\leq 10^7$~K is expected to be a significant reservoir of baryons
in the universe \citep[e.g.][]{cen1999,dave2001}. Using the \it ROSAT\rm\ soft X-ray
Position Sensitive Proportional Counter (PSPC) detector -- better suited to
detect the emission from sub-keV plasma -- we
have already reported
the detection of a large-scale halo of emission around the \emph{Coma} cluster out to $\sim$~5 Mpc, well beyond the
cluster's virial radius \citep{bonamente2003,bonamente2009}.
It is possible to speculate that the high mass of \emph{Abell~1835}, one
of the most luminous and massive clusters on the \emph{Bright Cluster Survey} sample \citep{ebeling1998},
is responsible for the heating of the infalling gas to temperatures that makes it
detectable by \it Chandra\rm, and that other massive clusters may therefore provide
evidence of emission to the virial radius with the \it Chandra\rm\ ACIS detectors.
The infall scenario is supported by the \emph{Herschel} observations
of \cite{pereira2010}, who measure a galaxy velocity distribution for \emph{Abell~1835}\
that does not appear to decline at large radii as in most of the other clusters
in their sample. A possible interpretation for their data is the presence of a
surrounding filamentary structure that is infalling into the cluster.
\subsection{Effects of gas clumping at large radii}
Masses and entropy measured in this paper assume that the gas has a uniform density
at each radius. To quantify the effect of departures from uniform density, we
define the clumping factor $C$
as the ratio of density averages over a large region,
\begin{align}
C & = \frac{\langle n_e^2 \rangle}{\langle n_e \rangle^2}
\end{align}
with $C \geqslant 1$.
Clumped gas emits more efficiently than gas of uniform
density,
and the same surface brightness $I$ results in a lower estimate for the gas density and mass,
\begin{equation}
I \propto \int <n_e^2> dl = \int <n_e>^2 C dl,
\end{equation}
where $l$ is a distance along the sightline.
From Figure~\ref{fig:entropy-profile} we see that the entropy drop from
approximately 400" to 600" would be offset by a decrease in $n_e^{2/3}$ by a factor
of 3, or a decrease in $n_e$ by a factor of 5. We therefore suggest that a clumping
factor of $C \simeq 25$ at 600" would in principle be able to provide
a flat entropy profile, and even higher clumping factors would provide
an increasing entropy profile in better agreement with theory \citep[e.g.][]{voit2005,tozzi2001}.
Numerical simulations by \cite{nagai2011} suggest values of the clumping factor
$C \leq 3$ near $r_{200}$, with significantly higher clumping possible at larger radii.
Use of the \cite{nagai2011} model in the analysis of a large sample of galaxy clusters by \cite{eckert2012}
results in better agreement of observations with numerical simulations.
Clumping can also affect the measurement of hydrostatic masses.
In particular, gas with an increasing radial profile of the clumping factor
could result in a steeper gradient of the density profile, when compared with what is measured assuming
a uniform density. According to Equation~\ref{eq:hse}, this
would result in larger estimates of the hydrostatic mass, in principle able to reduce or entirely
offset the apparent decreas of $M(r)$ reported in Figure~\ref{fig:mass-0-600}.
We therefore conclude that a radial increase in the clumping of the gas can in principle
account for the apparent decrease of the mass profile and of the entropy profile
reported in this paper (Figures~\ref{fig:mass-0-600} and \ref{fig:entropy-profile}), and therefore
it is a viable scenario to interpret our \it Chandra\rm\ observations.
Clumping of the gas at large radii has also been suggested based on \it Suzaku\rm\ observations
\citep[e.g.,][]{simionescu2011}.
\section{Conclusions}
In this paper we have reported the detection of emission from \emph{Abell~1835}\ with \it Chandra\rm\
out to the cluster's virial radius. The cluster's surface brightness
is significantly above the background level out to a radius of
approximately 10 arcminutes, which correspond to $\sim$2.4 Mpc at the
cluster's redshift. We have investigated several sources of systematic
errors in the background subtraction process, and determined that the
significance of the detection in the outer region (450-600") is
$\geq 4.7$~$\sigma$, and the emission cannot be explained
by fluctuations in the background. Detection out to the virial
radius is also implied by the \it XMM-Newton\rm\ temperature profile
reported by \cite{snowden2008}.
The \it Chandra\rm\ superior angular resolution made it straightforward to
identify and subtract sources of X-ray emission that are unrelated to the cluster.
In addition to a large number of point sources, we have identified X-ray emission
from two low-mass clusters that were selected from the SDSS data,
MAXBCG J210.31728+02.75364 \citep{koester2007}
and WHL J140031.8+025443 \citep{wen2009}.
The two clusters have photometric and spectroscopic redshifts that make them
likely associated with \emph{Abell~1835}. These are the only two
SDSS-selected clusters that are in the vicinity of \emph{Abell~1835}.
The outer regions of the \emph{Abell~1835}\ cluster have a sharp drop in the temperature
profile, a factor of about ten from the peak temperature. The sharp drop
in temperature implies that the hot gas cannot be in hydrostatic equilibrium, and
that the hot gas would be convectively unstable. A possible scenario to
explain the observations is the presence of \emph{warm-hot} gas
near the virial radius that is not in hydrostatic equilibrium with
the cluster's potential, and with a mass budget comparable to that
of the entire ICM. The data are also consistent with an alternative scenario
in which a significant clumping of the gas at large radii is responsible
for the apparent negative gradients of the mass and entropy profiles
at large radii.
\bibliographystyle{mn2e}
\bibliographystyle{apj}
\input{ms.bbl}
\label{lastpage}
\end{document}
|
{
"timestamp": "2012-10-17T02:13:14",
"yymm": "1206",
"arxiv_id": "1206.6067",
"language": "en",
"url": "https://arxiv.org/abs/1206.6067"
}
|
\section{Introduction}
\label{sec:Intro}
The notion of entanglement \cite{BengtssonZyczkowski,Horodecki4}
was regarded by Schr\"odinger \cite{Schrodinger,Schrodinger2} to be the characteristic trait of quantum mechanics.
It serves as a resource for Quantum Information Theory \cite{NielsenChuang},
a relatively new field of research
dealing with the properties, characterization and applications
(mostly in quantum computation \cite{NielsenChuang})
of the nonlocal behavior of entangled quantum states.
For a multipartite quantum system being in a \emph{pure state},
it is easy to decide, in general,
which subsystems are entangled with some of the others or, equivalently,
which subsystems can be separated from the others.
For a multipartite quantum system being in a \emph{mixed state},
however, this partial separability problem has not been considered in the full detail yet.
This problem is twofold.
Even if we have the \emph{definitions of the different classes},
which is not self-evident at all for more-than-two-partite systems,
\emph{deciding to which class a given state belongs} is also a nontrivial task.
In this paper, we work out solutions for both parts of this problem.
Considering the \emph{first part of this problem},
we extend the classification based on $k$-separability and $\alpha_k$-separability
given by Seevinck and Uffink \cite{SeevinckUffinkMixSep},
which is the extension of the classification dealing only with $\alpha_k$-separability
given by D\"ur and Cirac \cite{DurCiracTarrach3QBMixSep,DurCiracTarrachBMixSep}.
We discuss in detail the tripartite case,
then give the definitions for systems of arbitrary number of subsystems.
Before we outline our solution for the \emph{second part of the problem},
we take a short detour.
If in the \emph{tripartite} case we restrict ourselves to \emph{qubits},
which is a relatively well-understood chapter of the theory of quantum entanglement,
some interesting results are known from the literature.
From the point of view of the present work, the most important ones are the following three.
First,
\refstepcounter{txtitem}\thetxtitem{} this is the system where the nontrivial structure of entanglement manifested itself for the first time
and it came to light that ``there are different kinds of entanglement'' of \emph{pure states} \cite{DurVidalCiracSLOCC3QB}.
Then, \refstepcounter{txtitem}\thetxtitem{} these different kinds of pure-state entanglement \cite{DurVidalCiracSLOCC3QB}
give rise to classes of mixed state entanglement \cite{Acinetal3QBMixClass}.
On the other hand,
\refstepcounter{txtitem}\thetxtitem{} \label{text:FTS} recently a beautiful correspondence was found between the three-qubit Hilbert space and a particular FTS (Freudenthal Triple System),
a correspondence which is ``compatible'' with the entanglement of pure three-qubit states \cite{BorstenetalFreudenthal3QBEnt}.
Apart from these three, for the sake of completeness, we have to make mention of
\refstepcounter{txtitem}\thetxtitem{} the famous phenomenon of monogamy of qubit systems, which was revealed first for three-qubit systems \cite{CKWThreetangle}
then shown for multiqubit systems \cite{OsborneVerstraeteMonogamy},
and \refstepcounter{txtitem}\thetxtitem{} the interesting twistor-geometric approach of the entanglement of three-qubit systems \cite{PeterGeom3QBEnt}.
Item \ref{text:FTS} above gives us a hint of an answer to \emph{the second part of the problem} for three-qubit systems.
In the FTS approach of three-qubit entanglement some special quantities have appeared,
from which we gain real valued functions on pure states.
These functions have very useful vanishing properties,
which enable
their convex roof extensions \cite{BennettetalMixedStates,UhlmannFidelityConcurrence,UhlmannConvRoofs}
to identify all the classes that our extended classification deals with.
On the other hand, it will be possible to define suitable functions
for the identification of the classes in general,
for subsystems of arbitrary dimensions---moreover, for arbitrary number of subsystems---in another way
than was done with the FTS approach working only for three qubits.
However, we will keep the considerations coming from the FTS approach,
because these considerations have given us the main ideas,
they have advantages for the case of three-qubits,
and, besides these, they are beautiful and interesting in themselves.
In the bipartite case, a state---either pure or mixed---can be either separable or entangled \cite{WernerSep},
and the vanishing of the convex roof extension of local entropies of pure states
is a \emph{necessary and sufficient criterion} of separability.
For us, this is the archetype of the general method of the detection of convex subsets by convex roof extensions.
However, for more-than-two-partite systems,
the partial separability properties have a complicated structure,
and, to our knowledge, this method was not used.
Instead of that,
the usual approach was the use of witness operators, as was done originally for three-qubit systems \cite{Acinetal3QBMixClass},
or other \emph{necessary but not sufficient criteria} for the detection of convex subsets \cite{Horodecki4,BengtssonZyczkowski,SzalaySepCrit,GuhneTothEntDet}.
Before starting,
we review the classification schemes of states of multipartite quantum systems.
One of the main concepts here was the use of LOCC
(Local Operations assisted by Classical Communication
\cite{BennettetalEquivalences})
either with certainty or with possibility,
for the purpose of classification.
This concept has turned out to be useful in the restricted case when the input and output states are both pure.
First we recall the classification schemes dealing with LOCC.
For mixed states, only coarse-grained classifications are worked out,
which are recalled as well.
\emph{LOCC classification:}
Two states are equivalent under LOCC---they are in the same LOCC class---by definition
if they can be transformed to each other \emph{with certainty} by the use of LOCC.
For pure states, it turned out that
two states are equivalent under LOCC
if and only if they can be transformed into each other by LU (Local Unitary) transformations \cite{BennettetalEquivalences}.
So, for pure states, this gives the most fine-grained classification scheme imaginable.
Many continuous and discrete parameters are required to label the LOCC classes
\cite{LindenPopescuOnMultipartEnt,AcinetalGenSchmidt3QB,Acinetal3QBPureCanon,Sudbery3qb,Kempe3qb}.
From the point of view of quantum computational purposes,
two LOCC-equivalent pure states can be used for exactly the same task.
However, to our knowledge,
there is no such practical criterion of LOCC equivalence for mixed states
as the LU equivalence was for pure states.
\emph{SLOCC classification:}
A coarse-grained classification can be defined if we demand only the possibility of the transformation.
Two states are equivalent under SLOCC (Stochastic LOCC)---they are in the same SLOCC class---by definition
if they can be transformed into each other \emph{with non-zero probability} by the use of LOCC.
For pure states, it turned out that
two states are equivalent under SLOCC
if and only if they can be transformed into each other by LGL (Local General Linear) transformations \cite{DurVidalCiracSLOCC3QB}.
(Sometimes that was called ILO, stands for Invertible Local Operation \cite{DurVidalCiracSLOCC3QB},
but we prefer the uniform naming after the corresponding Lie groups.)
So this gives a coarse-grained classification scheme for pure states.
In some cases, including the three-qubit case, only countable finite SLOCC classes arise \cite{DurVidalCiracSLOCC3QB}.
From the point of view of quantum computational purposes,
two SLOCC-equivalent pure states can be used for the same task but with a different probability of success.
Again, to our knowledge,
there is no such practical criterion of SLOCC equivalence for mixed states
as the LGL equivalence was for pure states.
\emph{PS classification (Partial Separability):}
A more coarse-grained classification involves only the partial-separability properties.
This works for both pure and mixed states and gives only countably finite classes in both cases.
We elaborate this classification in detail in this paper for mixed states.
This classification deals with \emph{all the possible kinds of partial separability,}
which are of finite number,
whose special cases are the subsets of $k$-separability and $\alpha_k$-separability \cite{SeevinckUffinkMixSep}.
From the point of view of quantum computational purposes,
however, this classification is a bit too coarse grained,
since it does not make distinction among
pure states contained in
different SLOCC classes but having the same PS properties,
although these states may be suitable for different tasks.
\emph{PSS classification (Partial Separability extended by pure-state SLOCC classes):}
A cure for the problem above is another means of classification,
which was given by Ac\'in \textit{et.~al.}~\cite{Acinetal3QBMixClass}
only for three-qubit states.
Here, the starting point is the \emph{pure}-state SLOCC classes which are of finite number,
and the only difference between the partial separability classes and SLOCC classes
is the split of the three-qubit entangled class into two classes \cite{DurVidalCiracSLOCC3QB}.
The PSS classes arising from these classes for \emph{mixed} states
are the same for biseparability, and only the tripartite entangled set is divided into two classes.
This classification has the advantage of differentiating among different SLOCC classes of pure states,
and also among mixed states depending on which kind of pure entanglement is needed for the preparation of the state.
However, in the majority of the cases there are continuously infinite SLOCC classes of pure states
labeled by more than one continuous parameter \cite{DurVidalCiracSLOCC3QB,VerstraeteetalSLOCC4QB,ChterentalDjokovicSLOCC4QB},
in which case it is not clear how this classification can be carried out,
if it can be at all.
The organization of this paper is as follows.
In the first half of the paper,
we work out the main concepts on three-qubit states.
In Sec.~\ref{sec:Pure}, we review the SLOCC classification of pure three-qubit states.
We recall the conventional LU invariants (in Sec.~\ref{subsec:Pure:ClassLU})
and the LSL tensors (Local Special Linear) of the FTS approach (in Sec.~\ref{subsec:Pure:ClassLSL})
by which the SLOCC classes can be identified.
Then we obtain a new set of LU invariants (in Sec.~\ref{subsec:Pure:NewInvs})
being necessary later for mixed states.
In Sec.~\ref{sec:Mixed}, we elaborate the
PSS classification (which contains also the PS classification)
for mixed three-qubit states.
We define the PS(S) subsets (in Sec.~\ref{subsec:Mixed:Subsets})
and PS(S) classes (in Sec.~\ref{subsec:Mixed:Classes}).
Then we give the functions for the identification of the PS(S) classes
(in Sec.~\ref{subsec:Mixed:CRoof}).
In Sec.~\ref{sec:Xmpl}, we demonstrate the nonemptiness of some of the new classes
for the three-qubit case
by explicit examples.
In Sec.~\ref{sec:GenThreePart}, we generalize the functions for the case of three subsystems of arbitrary dimensions.
First we see how far the method coming from the FTS approach can go (in Sec.~\ref{subsec:GenThreePart:FTS});
then we formulate a more general set of functions working without limitations (in Sec.~\ref{subsec:GenThreePart:nFTS}).
In Sec.~\ref{sec:Gen}, we generalize the construction for the case of arbitrary number of subsystems of arbitrary dimensions.
We work out the labeling of the PS subsets (in Sec.~\ref{subsec:Gen:PSsubsets})
along with the PS classes
and give a general conjecture about their nonemptiness (in Sec.~\ref{subsec:Gen:PSclasses}).
Then we construct
the functions identifying the PS subsets and classes
with the minimal requirements (in Sec.~\ref{subsec:Gen:Indicators}),
as well as with stronger requirements leading to entanglement-monotone functions
(in Sec.~\ref{subsec:Gen:monIndicators}).
In Sec.~\ref{sec:Sum}, we give a summary, some remarks, and open questions.
Some technicalities about the new set of three-qubit LU invariants
and proofs of some statements about the general construction
are left to Appendixes \ref{appsec:explicit}
and \ref{appsec:Gen}.
\section{Pure three-qubit states}
\label{sec:Pure}
Before starting,
we set some conventions that are very convenient for the tripartite case.
The labels of the subsystems are the numbers $1$, $2$, and $3$,
while the letters $a$, $b$, and $c$ are variables
taking their values in the set of labels $\{1,2,3\}$.
When $a$, $b$, and $c$ appear together in a formula,
they form a partition of $\{1,2,3\}$,
so they take always different values,
and \emph{the formula is understood for all the different values of these variables automatically.}
(However, sometimes a formula is symmetric under the interchange of two such variables
in which case we keep only one of the identical formulas.)
The Hilbert space of a three-qubit system is
$\mathcal{H}=\mathcal{H}^1\otimes\mathcal{H}^2\otimes\mathcal{H}^3$,
where, after the choice of an orthonormal basis $\{\cket{0},\cket{1}\}\subset\mathcal{H}^a$, $\mathcal{H}^a\cong\field{C}^2$.
The $\cket{\psi}\in\mathcal{H}$ state vectors
are not required to be normalized in this section,
so the $0\in\mathcal{H}$ zero vector is also allowed.
(The physical states arise, however, from normalized vectors.)
\subsection{SLOCC classification by LU-invariants}
\label{subsec:Pure:ClassLU}
It is a well-known and celebrated result of D\"ur, Vidal, and Cirac \cite{DurVidalCiracSLOCC3QB} that
``three qubits can be entangled in two inequivalent ways.''
More fully, there are $1+1+3+1+1$ three-qubit SLOCC classes,
that is, subsets invariant under LGL transformations.
\begin{itemize}
\item $\mathcal{V}^\text{Null}$ (class Null): The zero-vector of $\mathcal{H}$.
\item $\mathcal{V}^{1|2|3}$ (class $1|2|3$): These vectors are fully separable, which are of the form
$\cket{\psi_1}\otimes\cket{\psi_2}\otimes\cket{\psi_3}$.
\item $\mathcal{V}^{a|bc}$ (three biseparable classes $a|bc$),
for example,
$\cket{\psi_1}\otimes\cket{\psi_{23}}\in\mathcal{V}^{1|23}$, where
$\cket{\psi_{23}}\neq \cket{\psi_2}\otimes\cket{\psi_3}$.
For such $\cket{\psi_{23}}$, a representative element is the standard B (Bell) state,
\begin{subequations}
\begin{equation}
\label{eq:B}
\cket{\text{B}}=\frac{1}{\sqrt{2}}\bigl(\cket{00}+\cket{11}\bigr).
\end{equation}
\item $\mathcal{V}^\text{W}$ (Class W):
This is the first class of genuine tripartite entanglement,
when no subsystem can be separated from the others.
A representative element is the standard W state,
\begin{equation}
\label{eq:W}
\cket{\text{W}}=\frac{1}{\sqrt{3}}\bigl(\cket{100}+\cket{010}+\cket{001}\bigr).
\end{equation}
\item $\mathcal{V}^\text{GHZ}$ (Class GHZ):
This is the second class of genuine tripartite entanglement,
the class of Greenberger-Horne-Zeilinger-type entanglement.
A representative element is the standard GHZ state,
\begin{equation}
\label{eq:GHZ}
\cket{\text{GHZ}}=\frac{1}{\sqrt{2}}\bigl(\cket{000}+\cket{111}\bigr).
\end{equation}
\end{subequations}
\end{itemize}
Formally speaking,
these classes define disjoint, LGL-invariant subsets of $\mathcal{H}$,
and cover $\mathcal{H}$ entirely:
$\mathcal{H}=
\mathcal{V}^\text{Null}
\cup\mathcal{V}^{1|2|3}
\cup\mathcal{V}^{1|23}
\cup\mathcal{V}^{2|13}
\cup\mathcal{V}^{3|12}
\cup\mathcal{V}^\text{W}
\cup\mathcal{V}^\text{GHZ}$.
Except $\mathcal{V}^\text{Null}$, these classes are not closed.
For the partial separability issues, we define
$\mathcal{V}^{123}=\mathcal{V}^\text{W}\cup\mathcal{V}^\text{GHZ}$.
For any $\cket{\psi}\in\mathcal{H}$,
it can be determined to which class $\cket{\psi}$ belongs
by the vanishing of the following quantities:
the norm,
\begin{subequations}
\label{eq:pureLUinvs}
\begin{equation}
\label{eq:pureLUinvs:n}
n(\psi)= \norm{\psi}^2,
\end{equation}
the local entropies,
\begin{equation}
\label{eq:pureLUinvs:sa}
s_a(\psi)= 4 \det \bigl[\tr_{bc} \bigl(\cket{\psi}\bra{\psi}\bigr)\bigr],
\end{equation}
[here we use a normalized quantum-Tsallis entropy of parameter $2$
(see in Sec.~\ref{subsec:GenThreePart:FTS}),
although every entropy does the job,
since they vanish only for pure density matrices]
and the three-tangle,
\begin{equation}
\label{eq:pureLUinvs:tau}
\tau(\psi)=4\abs{\Det(\psi)},
\end{equation}
\end{subequations}
which is given by Cayley's hyperdeterminant $\Det(\psi)$
\cite{CayleyHDet,GelfandetalDiscriminants,CKWThreetangle}.
All of these quantities are LU invariants,
[which is $\LieGrp{U}(2)^{\times3}$ in this case,]
moreover, $n$ is invariant under the larger group $\LieGrp{U}(8)$
and $\tau$ under $\bigl[\LieGrp{U}(1)\times\LieGrp{SL}(2,\field{C})\bigr]^{\times3}
\cong\LieGrp{U}(1)\times\LieGrp{SL}(2,\field{C})^{\times3}$.
It follows from the invariance properties and other observations \cite{DurVidalCiracSLOCC3QB}
that the SLOCC classes of pure three-qubit states
can be determined by the vanishing of these quantities
in the way which can be seen in Table \ref{tab:pureSLOCC}.
\begin{table}
\begin{tabular}{c||c|ccc|c}
Class & $n(\psi)$ & $s_1(\psi)$ & $s_2(\psi)$ & $s_3(\psi)$ & $\tau(\psi)$ \\
\hline
\hline
$\mathcal{V}^\text{Null}$ & $=0$ & $=0$ & $=0$ & $=0$ & $=0$ \\
\hline
$\mathcal{V}^{1|2|3}$ & $>0$ & $=0$ & $=0$ & $=0$ & $=0$ \\
\hline
$\mathcal{V}^{1|23}$ & $>0$ & $=0$ & $>0$ & $>0$ & $=0$ \\
$\mathcal{V}^{2|13}$ & $>0$ & $>0$ & $=0$ & $>0$ & $=0$ \\
$\mathcal{V}^{3|12}$ & $>0$ & $>0$ & $>0$ & $=0$ & $=0$ \\
\hline
$\mathcal{V}^\text{W}$ & $>0$ & $>0$ & $>0$ & $>0$ & $=0$ \\
$\mathcal{V}^\text{GHZ}$ & $>0$ & $>0$ & $>0$ & $>0$ & $>0$
\end{tabular}
\caption{SLOCC classes of three-qubit state vectors
identified by the vanishing of LU-invariants (\ref{eq:pureLUinvs}).}
\label{tab:pureSLOCC}
\end{table}
Our aim is the characterization of the \emph{mixed states}
by the vanishing of some quantities,
in a similar way
that the conditions in Table \ref{tab:pureSLOCC} for the quantities in (\ref{eq:pureLUinvs}) characterize the \emph{pure states.}
To obtain such a characterization scheme,
we need,
on the one hand, the generalization of classes defined somehow,
and, on the other hand, a suitable set of quantities which
are vanishing for some classes determined somehow and nonvanishing for the others.
These two issues are strongly related,
and it will turn out that
we can define a set of quantities which suits well the classification given by Seevinck and Uffink \cite{SeevinckUffinkMixSep},
but a ``more complete'' set of quantities suits well an extended but still relevant classification,
which are elaborated in Sec.~\ref{sec:Mixed}.
\subsection{SLOCC classification by LSL-covariants}
\label{subsec:Pure:ClassLSL}
In \cite{BorstenetalFreudenthal3QBEnt},
Borsten \textit{et.~al.}~revealed a very elegant correspondence
between
the three-qubit Hilbert space $\mathcal{H}\cong\field{C}^2\otimes\field{C}^2\otimes\field{C}^2$
and
the FTS (Freudenthal Triple System) $\mathfrak{M}(\mathcal{J})\cong\field{C}\oplus\field{C}\oplus\mathcal{J}\oplus\mathcal{J}$
over the cubic Jordan algebra $\mathcal{J}\cong\field{C}\oplus\field{C}\oplus\field{C}$.
The fundamental point of this correspondence is
that the automorphism group of this FTS is
$\LieGrp{Aut}[\mathfrak{M}(\field{C}\oplus\field{C}\oplus\field{C})]=\LieGrp{SL}(2,\field{C})^{\times3}$,
which is just the relevant LSL subgroup
of $\LieGrp{GL}(2,\field{C})^{\times3}$, the LGL-group of SLOCC equivalence for three-qubit pure states.
(This group-theoretical coincidence arises only in the three-qubit case.)
It has been shown \cite{BorstenetalFreudenthal3QBEnt} that
the vectors of \emph{different SLOCC classes of entanglement} in the three-qubit Hilbert space
are in one-to-one correspondence with
the elements of \emph{different rank} in the FTS.
The rank of an element of an FTS
is characterized by the vanishing of some associated elements,
which are covariant---maybe invariant---under the action of the automorphism group,
resulting in conditions for the SLOCC classes in the Hilbert-space by the vanishing or non vanishing of
$\LieGrp{SL}(2,\field{C})^{\times3}$ tensors.
Hence, this classification is manifestly invariant under SLOCC equivalence \cite{BorstenetalFreudenthal3QBEnt},
which cannot be seen directly in the conventional classification,
since the $s_a$ local entropies are scalars only under $\LieGrp{U}(2)^{\times3}$.
(However, the invariance of the vanishing of the functions $s_a$ follows easily from the fact
that the local rank is invariant under invertible transformations \cite{DurVidalCiracSLOCC3QB}.)
Let the three-qubit state $\cket{\psi}\in\mathcal{H}$ be expressed
in the computational basis $\{\cket{ijk}=\cket{i}\otimes\cket{j}\otimes\cket{k}\}$
as
\begin{equation*}
\cket{\psi}=\sum_{i,j,k=0}^1\psi^{ijk}\cket{ijk}.
\end{equation*}
We can assign an element $\psi\in\mathfrak{M}(\mathcal{\field{C}\oplus\field{C}\oplus\field{C}})$ to this
and calculate some associated quantities
needed for the identification of its rank.
Here we list these quantities
in the form in which we use them:
\begin{subequations}
\label{eq:tensors}
\begin{align}
\label{eq:tensors:Upsilon}
\begin{split}
[\Upsilon_\phi(\psi)]^{ijk} =
&-\varepsilon_{ll'}\varepsilon_{mm'}\varepsilon_{nn'}\psi^{imn}\psi^{lm'n'}\phi^{l'jk}\\
&-\varepsilon_{mm'}\varepsilon_{nn'}\varepsilon_{ll'}\psi^{ljn}\psi^{l'mn'}\phi^{im'k}\\
&-\varepsilon_{nn'}\varepsilon_{ll'}\varepsilon_{mm'}\psi^{lmk}\psi^{l'm'n}\phi^{ijn'},
\end{split}\\
\label{eq:tensors:gamma1}
[\gamma_1(\psi)]^{ii'}=&\; \varepsilon_{jj'}\varepsilon_{kk'}\psi^{ijk}\psi^{i'j'k'},\\
\label{eq:tensors:gamma2}
[\gamma_2(\psi)]^{jj'}=&\; \varepsilon_{kk'}\varepsilon_{ii'}\psi^{ijk}\psi^{i'j'k'},\\
\label{eq:tensors:gamma3}
[\gamma_3(\psi)]^{kk'}=&\; \varepsilon_{ii'}\varepsilon_{jj'}\psi^{ijk}\psi^{i'j'k'},\\
\label{eq:tensors:T}
\begin{split}
[T(\psi,\psi,\psi)]^{ijk}
=&-\varepsilon_{ll'}\varepsilon_{mm'}\varepsilon_{nn'}\psi^{imn}\psi^{lm'n'}\psi^{l'jk}\\
=&-\varepsilon_{mm'}\varepsilon_{nn'}\varepsilon_{ll'}\psi^{ljn}\psi^{l'mn'}\psi^{im'k}\\
=&-\varepsilon_{nn'}\varepsilon_{ll'}\varepsilon_{mm'}\psi^{lmk}\psi^{l'm'n}\psi^{ijn'},
\end{split}\\
\begin{split}
\label{eq:tensors:q}
q(\psi) =&\;
\varepsilon_{ii'}\varepsilon_{jj'}
\varepsilon_{kk'}\varepsilon_{ll'}
\varepsilon_{mm'}\varepsilon_{nn'}\\
&\qquad\times\psi^{ikl}\psi^{jk'l'}\psi^{i'mn}\psi^{j'm'n'}.
\end{split}
\end{align}
\end{subequations}
(For the basic definitions
of Jordan algebras, Freudenthal triple systems
and the operations and maps defined on them,
see in \cite{BorstenetalFreudenthal3QBEnt} and in the references therein.)
Here the summation for the pairs of indices occurring upstairs and downstairs
are understood, and
\begin{equation*}
\varepsilon_{ii'}=\begin{bmatrix}
0&1\\
-1&0
\end{bmatrix}
\end{equation*}
is the matrix of the
$\LieGrp{Sp}(1)\cong\LieGrp{SL}(2)$-invariant non-degenerate antisymmetric bilinear form:
Since $\transp{M}\varepsilon M= \varepsilon \det(M)$,
index contraction by $\varepsilon$ is invariant under $\LieGrp{SL}(2,\field{C})$ transformations.
This shows that
if we regard $\psi$ and $\phi$ as tensors that
transform as a $(\mathbf{2},\mathbf{2},\mathbf{2})$
under $\LieGrp{SL}(2,\field{C})^{\times3}$,
then so do $\Upsilon_\phi(\psi)$ and $T(\psi,\psi,\psi)$,
while $\gamma_1(\psi)$, $\gamma_2(\psi)$, and $\gamma_3(\psi)$, being symmetric, transform as
$(\mathbf{3},\mathbf{1},\mathbf{1})$,
$(\mathbf{1},\mathbf{3},\mathbf{1})$, and
$(\mathbf{1},\mathbf{1},\mathbf{3})$, respectively,
and $q(\psi)$ transforms as $(\mathbf{1},\mathbf{1},\mathbf{1})$;
that is, it is scalar.
[Note that for any $2\times2$ matrix $M$, the determinant
$2\det(M)=\varepsilon_{ii'}\varepsilon_{jj'}M^{ij}M^{i'j'}$,
so $2\det[\gamma_a(\psi)]=q(\psi)$.]
The main result of \cite{BorstenetalFreudenthal3QBEnt} is
that the conditions for the SLOCC classes can be formulated by the vanishing of these tensors
in the way which can be seen in Table \ref{tab:pureSLOCC2}.
\begin{table}
\begin{tabular}{c||c|c|ccc|c|c}
Class & $\psi$ & $\Upsilon_\phi(\psi)$ & $\gamma_1(\psi)$ & $\gamma_2(\psi)$ & $\gamma_3(\psi)$ & $T(\psi,\psi,\psi)$ & $q(\psi)$ \\
\hline
\hline
$\mathcal{V}^\text{Null}$ & $=0$ & $=0,\forall\phi$ & $=0$ & $=0$ & $=0$ & $=0$ & $=0$ \\
\hline
$\mathcal{V}^{1|2|3}$ & $\neq0$ & $=0,\forall\phi$ & $=0$ & $=0$ & $=0$ & $=0$ & $=0$ \\
\hline
$\mathcal{V}^{1|23}$ & $\neq0$ & $\neq0,\exists\phi$ & $\neq0$ & $=0$ & $=0$ & $=0$ & $=0$ \\
$\mathcal{V}^{2|13}$ & $\neq0$ & $\neq0,\exists\phi$ & $=0$ & $\neq0$ & $=0$ & $=0$ & $=0$ \\
$\mathcal{V}^{3|12}$ & $\neq0$ & $\neq0,\exists\phi$ & $=0$ & $=0$ & $\neq0$ & $=0$ & $=0$ \\
\hline
$\mathcal{V}^\text{W}$ & $\neq0$ & $\neq0,\exists\phi$ & $\neq0$ & $\neq0$ & $\neq0$ & $\neq0$ & $=0$ \\
$\mathcal{V}^\text{GHZ}$ & $\neq0$ & $\neq0,\exists\phi$ & $\neq0$ & $\neq0$ & $\neq0$ & $\neq0$ & $\neq0$
\end{tabular}
\caption{SLOCC classes of three-qubit state vectors
identified by the vanishing of LSL-covariants (\ref{eq:tensors}).}
\label{tab:pureSLOCC2}
\end{table}
In the light of the conditions by the norm and the four determinants, (see in Table \ref{tab:pureSLOCC}),
this scheme constructed by seven quantities seems to be redundant.
Indeed, it is redundant for pure states,
but it will turn out that this way leads to the generalization for mixed states.
\subsection{SLOCC classification by a new set of LU-invariants}
\label{subsec:Pure:NewInvs}
To follow this way,
we need quantities which can be extended from pure states to mixed states
by the convex roof construction \cite{BennettetalMixedStates,UhlmannFidelityConcurrence,UhlmannConvRoofs}.
There is no natural ordering on the tensors of (\ref{eq:tensors})
so convex roof construction does not work directly for them,
but we can form quantities from them taking values in the field of real numbers.
During this,
we lose the covariance under $\LieGrp{SL}(2,\field{C})^{\times3}$,
but gain the invariance under the group $\LieGrp{U}(2)^{\times3}$.
Returning from the FTS language to the Hilbert space language,
we have ``state vectors''
\begin{align*}
\cket{\Upsilon_\phi(\psi)}&=\sum_{i,j,k=0}^1[\Upsilon_\phi(\psi)]^{ijk}\cket{ijk}\in\mathcal{H},\\
\cket{T(\psi,\psi,\psi)}&=\sum_{i,j,k=0}^1[T(\psi,\psi,\psi)]^{ijk}\cket{ijk}\in\mathcal{H},
\end{align*}
and ``local operators''
\begin{align*}
\gamma_1(\psi)\varepsilon&=\sum_{i,i'=0}^1[\gamma_1(\psi)\varepsilon]^i_{\;i'}\cket{i}\bra{i'}\in\Lin(\mathcal{H}^1),\\
\gamma_2(\psi)\varepsilon&=\sum_{j,j'=0}^1[\gamma_2(\psi)\varepsilon]^j_{\;j'}\cket{j}\bra{j'}\in\Lin(\mathcal{H}^2),\\
\gamma_3(\psi)\varepsilon&=\sum_{k,k'=0}^1[\gamma_3(\psi)\varepsilon]^k_{\;k'}\cket{k}\bra{k'}\in\Lin(\mathcal{H}^3),
\end{align*}
associated with $\cket{\psi}\in\mathcal{H}$ through (\ref{eq:tensors}).
These are just computational auxiliaries,
not state vectors and local operators in the ordinary sense,
because they depend nonlinearly on the state vector $\cket{\psi}$.
[Note that
$\varepsilon \in \mathcal{H}^{a*}\otimes\mathcal{H}^{a*}
\cong\Lin(\mathcal{H}^a\to\mathcal{H}^{a*})
\cong\BiLin(\mathcal{H}^a\times\mathcal{H}^a\to\field{C})$,
while
$\gamma_a(\psi)\in \mathcal{H}^a\otimes\mathcal{H}^a
\cong\Lin(\mathcal{H}^{a*}\to\mathcal{H}^a)$,
so $\gamma_a(\psi)\varepsilon\in\Lin(\mathcal{H}^a\to\mathcal{H}^a)$.]
Now, the vanishing conditions of the tensors (\ref{eq:tensors}) in Table \ref{tab:pureSLOCC2}
can be reformulated.
Clearly, $\psi=0$ if and only if $\norm{\psi}^2=0$.
Taking a look at $\Upsilon_\phi(\psi)$ in (\ref{eq:tensors:Upsilon}) it turns out that
$\Upsilon_\phi(\psi)$ can be written in the Hilbert space language as
\begin{equation*}
\cket{\Upsilon_\phi(\psi)} = Y(\psi) \cket{\phi}
\end{equation*}
with the ``operator''
\begin{equation*}
Y(\psi) =
-\gamma_1(\psi)\varepsilon\otimes\mathrm{I}\otimes\mathrm{I}
-\mathrm{I}\otimes\gamma_2(\psi)\varepsilon\otimes\mathrm{I}
-\mathrm{I}\otimes\mathrm{I}\otimes\gamma_3(\psi)\varepsilon.
\end{equation*}
Using this,
the vanishing condition of $\Upsilon_\phi(\psi)$ for all $\phi$,
\begin{equation*}
\begin{split}
\cket{\Upsilon_\phi(\psi)}=0\;\;\forall\cket{\phi}\quad
&\Longleftrightarrow\quad Y(\psi) \cket{\phi}=0\;\;\forall\cket{\phi}\\
&\Longleftrightarrow\quad Y(\psi)=0\\
&\Longleftrightarrow\quad \norm{Y(\psi)}^2=0 \;\; \text{for any norm},
\end{split}
\end{equation*}
so we can eliminate the quantors and $\phi$ from the condition.
Using the usual complex matrix $2$-norm $\norm{M}^2=\tr(M^\dagger M)$,
we have
\begin{equation*}
\norm{Y(\psi)}^2=4\bigl(\norm{\gamma_1(\psi)}^2 + \norm{\gamma_2(\psi)}^2 + \norm{\gamma_3(\psi)}^2\bigr).
\end{equation*}
This formula has a remarkable structure, namely
if we note that $s_a(\psi)=\norm{\gamma_b(\psi)}^2 + \norm{\gamma_c(\psi)}^2$
and $\gamma_a(\psi)=0$ if and only if $\norm{\gamma_a(\psi)}^2=0$.
Now turn to the vanishing of $T(\psi,\psi,\psi)$, given in (\ref{eq:tensors:T}).
Again, this vanishes if and only if its norm $\norm{T(\psi,\psi,\psi)}^2$ does.
This can be calculated by the use of the form
\begin{equation*}
\begin{split}
\cket{T(\psi,\psi,\psi)}
&= -\gamma_1(\psi)\varepsilon\otimes\mathrm{I}\otimes\mathrm{I} \cket{\psi}\\
&= -\mathrm{I}\otimes\gamma_2(\psi)\varepsilon\otimes\mathrm{I} \cket{\psi}\\
&= -\mathrm{I}\otimes\mathrm{I}\otimes\gamma_3(\psi)\varepsilon \cket{\psi}\\
&= \frac13 Y(\psi) \cket{\psi}.
\end{split}
\end{equation*}
(The quantity $\norm{T(\psi,\psi,\psi)}^2$ also appears in the
twistor-geometric approach of three-qubit entanglement,
it is proportional to $\omega_{\text{ABC}}$ in \cite{PeterGeom3QBEnt}.)
About the scalar $q$, note that $q(\psi)=-2\Det(\psi)$ \cite{CKWThreetangle},
and it vanishes if and only if the three-tangle (\ref{eq:pureLUinvs:tau}) does.
Summarizing the observations above,
it is useful to define the following set of real-valued functions on $\mathcal{H}$:
\begin{subequations}
\label{eq:newPureLUinvs}
\begin{align}
n(\psi) &= \norm{\psi}^2,\\
\label{eq:newPureLUinvs:y}
y(\psi) &= \frac23\bigl(g_1(\psi) + g_2(\psi) + g_3(\psi)\bigr),\\
\label{eq:newPureLUinvs:sa}
s_a(\psi) &= g_b(\psi) + g_c(\psi),\\
\label{eq:newPureLUinvs:ga}
g_a(\psi) &= \norm{\gamma_a(\psi)}^2,\\
\label{eq:newPureLUinvs:t}
t(\psi) &= 4\norm{T(\psi,\psi,\psi)}^2,\\
\label{eq:newPureLUinvs:tau2}
\tau^2(\psi)&= 4\abs{q(\psi)}^2.
\end{align}
\end{subequations}
[The explicit forms of these functions
and their relations to other important quantities
can be found in the Appendixes
\ref{appsubsec:explicit:stdLU}, \ref{appsubsec:explicit:LUcanon}, and \ref{appsubsec:explicit:WoottersConc}.
The constant factors have been chosen so that
$0\leq y(\psi),s_a(\psi),g_a(\psi),t(\psi),\tau^2(\psi) \leq1$ for normalized states,
which is shown in Appendix \ref{appsubsec:explicit:ranges}.]
These quantities are obtained by index contraction of $\psi^{ijk}$s and complex conjugated $\cc{(\psi^{i'j'k'})}$s by $\delta_{ii'}$s
from the tensors in (\ref{eq:tensors}), which were obtained by index contraction of $\psi^{ijk}$s and $\psi^{i'j'k'}$s by $\varepsilon_{ii'}$s.
From the contractions of free indices of the tensors in (\ref{eq:tensors}),
we have $U^\dagger\delta U= \delta$ for $U\in\LieGrp{U}(2)$.
From the contractions inside the tensors of (\ref{eq:tensors}),
we have $\transp{U}\varepsilon U= \varepsilon \det(U)$
but for every factor $\det(U)$ there is a conjugated $\cc{\det(U)}=1/\det(U)$
from $U^\dagger\varepsilon \cc{U}= \varepsilon \cc{\det(U)}$.
Consequently, all the functions in (\ref{eq:newPureLUinvs}) are LU invariant,
while their vanishings are still LSL invariant.
(Again, $n$ is invariant under the larger group $\LieGrp{U}(8)$,
and $\tau^2$ under $\left[\LieGrp{U}(1)\times\LieGrp{SL}(2,\field{C})\right]^{\times3}$.)
Now, the conditions for the SLOCC classes
by the vanishing of the tensors in (\ref{eq:tensors})
(see in Table \ref{tab:pureSLOCC2})
can be reformulated
by the vanishing of the functions in (\ref{eq:newPureLUinvs})
in the way which can be seen in Table \ref{tab:pureSLOCC3}.
\begin{table*}
\begin{tabular}{c||c|c|ccc|ccc|c|c}
Class & $n(\psi)$ & $y(\psi)$ & $s_1(\psi)$ & $s_2(\psi)$ & $s_3(\psi)$ & $g_1(\psi)$ & $g_2(\psi)$ & $g_3(\psi)$ & $t(\psi)$ & $\tau^2(\psi)$ \\
\hline
\hline
$\mathcal{V}^\text{Null}$ & $=0$ & $=0$ & $=0$ & $=0$ & $=0$ & $=0$ & $=0$ & $=0$ & $=0$ & $=0$ \\
\hline
$\mathcal{V}^{1|2|3}$ & $>0$ & $=0$ & $=0$ & $=0$ & $=0$ & $=0$ & $=0$ & $=0$ & $=0$ & $=0$ \\
\hline
$\mathcal{V}^{1|23}$ & $>0$ & $>0$ & $=0$ & $>0$ & $>0$ & $>0$ & $=0$ & $=0$ & $=0$ & $=0$ \\
$\mathcal{V}^{2|13}$ & $>0$ & $>0$ & $>0$ & $=0$ & $>0$ & $=0$ & $>0$ & $=0$ & $=0$ & $=0$ \\
$\mathcal{V}^{3|12}$ & $>0$ & $>0$ & $>0$ & $>0$ & $=0$ & $=0$ & $=0$ & $>0$ & $=0$ & $=0$ \\
\hline
$\mathcal{V}^\text{W}$ & $>0$ & $>0$ & $>0$ & $>0$ & $>0$ & $>0$ & $>0$ & $>0$ & $>0$ & $=0$ \\
$\mathcal{V}^\text{GHZ}$ & $>0$ & $>0$ & $>0$ & $>0$ & $>0$ & $>0$ & $>0$ & $>0$ & $>0$ & $>0$
\end{tabular}
\caption{SLOCC classes of three-qubit state vectors
identified by the vanishing of the pure-state indicator functions
given in (\ref{eq:newPureLUinvs}).}
\label{tab:pureSLOCC3}
\end{table*}
We call the functions in (\ref{eq:newPureLUinvs}) \emph{pure state indicator functions} for the three-qubit case.
We will give the exact definition of indicator functions for the general case later (in Sec.~\ref{subsec:Gen:Indicators}),
until that point we just use this name
for non-negative functions having the vanishing properties given in Table~\ref{tab:pureSLOCC3}.
Although this scheme constructed by ten quantities is even more redundant than the previous two,
but it will turn out that these ten indicator functions (\ref{eq:newPureLUinvs})
will be necessary in the case of mixed states.
Moreover, investigating Table~\ref{tab:pureSLOCC3}, we can catch all the ideas leading to the general construction.
\section{Mixed three-qubit states}
\label{sec:Mixed}
Here we recall and extend the
PSS classification for three qubits.
The main concept here, first given in \cite{DurCiracTarrach3QBMixSep,DurCiracTarrachBMixSep},
then used and extended in \cite{SeevinckUffinkMixSep,Acinetal3QBMixClass},
is that we define a density matrix to be the element of a class
according to whether it can or cannot be mixed by the use of
pure states of some given kinds.
\subsection{Convex subsets}
\label{subsec:Mixed:Subsets}
Let us introduce some convenient notations.
The set of states $\mathcal{D}\equiv\mathcal{D}(\mathcal{H})\subset\Lin(\mathcal{H})$
is the convex body of positive semidefinite operators of unit trace acting on $\mathcal{H}$,
while the set of pure states $\mathcal{P}\subset\mathcal{D}$
is the set of extremal points of $\mathcal{D}$,
which are the projection operators of rank $1$.
(For the sake of simplicity, we have restricted ourselves to the operators of unit trace, that is, density matrices,
in spite of the fact that the construction could be extended for the whole cone of positive semidefinite operators.)
Disjoint subsets in $\mathcal{P}$
given by unit vectors of different SLOCC classes are
\begin{subequations}
\label{eq:Psets}
\begin{align}
\mathcal{P}^{1|2|3} &=\bigl\{\cket{\psi}\bra{\psi}\;\big\vert\; \cket{\psi}\in \mathcal{V}^{1|2|3},\; \norm{\psi}^2=1 \bigr\},\\
\mathcal{P}^{a|bc} &=\bigl\{\cket{\psi}\bra{\psi}\;\big\vert\; \cket{\psi}\in \mathcal{V}^{a|bc},\; \norm{\psi}^2=1 \bigr\},\\
\mathcal{P}^\text{W} &=\bigl\{\cket{\psi}\bra{\psi}\;\big\vert\; \cket{\psi}\in \mathcal{V}^\text{W},\; \norm{\psi}^2=1 \bigr\},\\
\mathcal{P}^\text{GHZ} &=\bigl\{\cket{\psi}\bra{\psi}\;\big\vert\; \cket{\psi}\in \mathcal{V}^\text{GHZ},\; \norm{\psi}^2=1 \bigr\},
\end{align}
which cover $\mathcal{P}$ entirely:
$\mathcal{P}=\mathcal{P}^{1|2|3}\cup\mathcal{P}^{1|23}\cup\mathcal{P}^{2|13}\cup\mathcal{P}^{3|12}
\cup\mathcal{P}^\text{W}\cup\mathcal{P}^\text{GHZ}$.
Besides these, if only partial separability properties are considered,
define
\begin{equation}
\mathcal{P}^{123} =\bigl\{\cket{\psi}\bra{\psi}\;\big\vert\; \cket{\psi}\in \mathcal{V}^{123},\; \norm{\psi}^2=1 \bigr\},
\end{equation}
\end{subequations}
so $\mathcal{P}^{123}=\mathcal{P}^\text{W}\cup\mathcal{P}^\text{GHZ}$.
Except $\mathcal{P}^{1|2|3}$, none of the above sets are closed.
The notion of $k$-separability and $\alpha_k$-separability \cite{SeevinckUffinkMixSep},
and the relevant classes of \cite{Acinetal3QBMixClass} for three-qubit systems
can be formulated as the convex hulls of some of the sets (\ref{eq:Psets}).
The \emph{$3$-separable states} ($\mathcal{D}^\text{$3$-sep}$),
or, equivalently $1|2|3$-separable states ($\mathcal{D}^{1|2|3}$)
can be mixed from the pure states of $\mathcal{P}^{1|2|3}$, i.e., they are fully separable.
The \emph{$a|bc$-separable states} ($\mathcal{D}^{a|bc}$)
can be written in the form $\sum_ip_i\varrho_{a,i}\otimes\varrho_{bc,i}$,
[$\varrho_{a,i}\in\mathcal{D}(\mathcal{H}^a)$,
$\varrho_{bc,i}\in\mathcal{D}(\mathcal{H}^b\otimes\mathcal{H}^c)$],
where we demand only the split between $a$ and $bc$,
but s split between $b$ and $c$ can also occur in the pure-state decompositions,
so they can be mixed from the pure states of $\mathcal{P}^{1|2|3}$ and $\mathcal{P}^{a|bc}$.
The \emph{$2$-separable states}, also called \emph{biseparable states} ($\mathcal{D}^\text{$2$-sep}$)
are of the form $\sum_ip_i\varrho_{a_i,i}\otimes\varrho_{b_ic_i,i}$,
so they can be mixed from the pure states of $\mathcal{P}^{1|2|3}$,
$\mathcal{P}^{1|23}$, $\mathcal{P}^{2|13}$, and $\mathcal{P}^{3|12}$.
These states are also of relevance because, although they are not separable under any $a|bc$ split,
there is no need of genuine three-qubit entangled pure state to mix them \cite{SeevinckUffinkMixSep}.
From the point of view of convex hulls of extremal points,
it can be seen better than originally in \cite{SeevinckUffinkMixSep} that we can define
three new partial separability sets ``between'' the $a|bc$-separable
and $2$-separable ones.
For example, the \emph{$2|13$-$3|12$-separable states} ($\mathcal{D}^{\twoprt{2|13}{3|12}}$)
are the states which
can be mixed from the pure states of $\mathcal{P}^{1|2|3}$, $\mathcal{P}^{2|13}$, and $\mathcal{P}^{3|12}$.
States of this kind are also of relevance,
since there is no need of $1|23$-separable pure states to mix them,
that is, entanglement between the $2$ and the $3$ subsystems.
Beyond these,
we use the set of \emph{W-states} \cite{Acinetal3QBMixClass} ($\mathcal{D}^\text{W}$)
which can be expressed as the mixture of the pure states $\mathcal{P}^{1|2|3}$,
$\mathcal{P}^{1|23}$, $\mathcal{P}^{2|13}$, $\mathcal{P}^{3|12}$,
and $\mathcal{P}^\text{W}$,
so there is no need for pure states of GHZ type to mix them,
and the set of \emph{GHZ states} ($\mathcal{D}^\text{GHZ}$)
or, equivalently, $1$-separable ($\mathcal{D}^\text{$1$-sep}$),
or $123$-separable states ($\mathcal{D}^{123}$),
which is equal to the full set of states ($\mathcal{D}$).
Summarizing, we have the following
\emph{PSS subsets} in $\mathcal{D}$
arising as convex hulls of pure states of given kinds:
\begin{subequations}
\label{eq:Dsets}
\begin{align}
\label{eq:Dsets:1|2|3}
\mathcal{D}^{1|2|3} &= \Conv\bigl(
\mathcal{P}^{1|2|3}\bigr)\equiv\mathcal{D}^\text{$3$-sep},\\
\mathcal{D}^{a|bc} &= \Conv\bigl(
\mathcal{P}^{1|2|3}\cup\mathcal{P}^{a|bc}\bigr),\\
\label{eq:Dsets:bcacab}
\mathcal{D}^\twoprt{b|ac}{c|ab} &= \Conv \bigl(
\mathcal{P}^{1|2|3}\cup\mathcal{P}^{b|ac}\cup\mathcal{P}^{c|ab}\bigr),\\
\label{eq:Dsets:2-sep}
\mathcal{D}^\text{$2$-sep} &= \Conv \bigl(
\mathcal{P}^{1|2|3}\cup\mathcal{P}^{1|23}\cup\mathcal{P}^{2|13}\cup\mathcal{P}^{3|12}\bigr),\\
\label{eq:Dsets:W}
\begin{split}
\mathcal{D}^\text{W} &= \Conv \bigl(
\mathcal{P}^{1|2|3}\cup\mathcal{P}^{1|23}\cup\mathcal{P}^{2|13}\cup\mathcal{P}^{3|12}\\
&\qquad\qquad\cup\mathcal{P}^\text{W}\bigr),
\end{split}\\
\label{eq:Dsets:123}
\begin{split}
\mathcal{D}^{123} &= \Conv \bigl(
\mathcal{P}^{1|2|3}\cup\mathcal{P}^{1|23}\cup\mathcal{P}^{2|13}\cup\mathcal{P}^{3|12}\\
&\qquad\qquad\cup\underbrace{\mathcal{P}^\text{W}\cup\mathcal{P}^\text{GHZ}}_{\mathcal{P}^{123}}\bigr)
\equiv\mathcal{D}^\text{$1$-sep}\equiv\mathcal{D}.
\end{split}
\end{align}
\end{subequations}
These sets are convex and they
contain each other in a hierarchic way,
which is illustrated in Fig.~\ref{fig:incl}.
\begin{figure}
\includegraphics{mix3qb1.eps}
\caption{Inclusion hierarchy of the PSS sets $\mathcal{D}^{\dots}$ given in (\ref{eq:Dsets}).}
\label{fig:incl}
\end{figure}
From an abstract point of view,
we form the convex hulls of \emph{closed} sets \cite{Acinetal3QBMixClass},
and the convex hulls of \emph{all the possible closed sets}
arising from the unions of the $\mathcal{P}^{\dots}$ sets (\ref{eq:Psets}) of extremal points
are listed in (\ref{eq:Dsets}) above.
We mean the PSS classification
involving the PSS subsets (\ref{eq:Dsets:1|2|3})--(\ref{eq:Dsets:123})
[and the PS classification
involving the PS subsets (\ref{eq:Dsets:1|2|3})--(\ref{eq:Dsets:2-sep}) and (\ref{eq:Dsets:123})]
to be \emph{complete} in \emph{this} sense.
As special, noncomplete cases,
we get back the classification involving only the sets
$\mathcal{D}^\text{$k$-sep}$ and $\mathcal{D}^{\alpha_k}$
(for any $k$-partite split $\alpha_k$)
obtained by Seevinck and Uffink \cite{SeevinckUffinkMixSep},
the classification involving only the sets $\mathcal{D}^{\alpha_k}$
obtained by D\"ur and Cirac \cite{DurCiracTarrach3QBMixSep,DurCiracTarrachBMixSep}
and also the classification involving only the sets $\mathcal{D}^\text{$k$-sep}$ and $\mathcal{D}^\text{W}$,
obtained by Ac\'in, Bru\ss{}, Lewenstein and Sanpera \cite{Acinetal3QBMixClass}.
\subsection{Classes}
\label{subsec:Mixed:Classes}
Now, we determine the \emph{PSS classes} of three-qubit mixed states.
The abstract definition of these classes \cite{SeevinckUffinkMixSep}
is that they are the possible nontrivial intersections of the $\mathcal{D}^{\dots}$ convex subsets listed in (\ref{eq:Dsets}).
Since we want to deal also with the sets $\mathcal{D}^{\twoprt{b|ac}{c|ab}}$,
we cannot draw an expressive ``onionlike'' figure as was done in \cite{SeevinckUffinkMixSep}
for the sets $\mathcal{D}^{1|2|3}$, $\mathcal{D}^{a|bc}$, and $\mathcal{D}^\text{$2$-sep}$.
We have to proceed in a formal manner.
If we have the sets $A_1,A_2,\dots,A_n$,
all of their possible intersections can be constructed
as the intersections for all $i$ the set $A_i$ or its complement $\cmpl{A}_i$.
We have $10$ PSS subsets $\mathcal{D}^{\dots}$,
so we can formally write $2^{10}=1024$ possible intersections in this way.
If $B\subseteq A$, then $B\cap \cmpl{A}=\emptyset$,
so some intersections will be automatically empty
(\emph{``empty by construction''})
and,
using the inclusion hierarchy of PSS subsets in Fig.~\ref{fig:incl},
we write only the intersections which are ``not empty by construction.''
The number of these will turn out to be only $21$.
(Again, if $B\subseteq A$, then $B\cap A=B$ and $\cmpl{B}\cap \cmpl{A}=\cmpl{A}$,
so we can write these $21$ classes as intersection sequences
much shorter than $10$ terms.)
Since the appearance of the $\mathcal{D}^{\twoprt{b|ac}{c|ab}}$-type sets in the intersections
makes the meaning of the classes a little bit involved,
we write out the list of the PSS classes with detailed explanations.
First, the class
\begin{subequations}
\label{eq:Classes}
\begin{equation}
\mathcal{C}^3=\mathcal{D}^{1|2|3}
\end{equation}
is the set of fully separable states.
Then come the $18$ classes of $2$-separable entangled states,
that is, the subsets in $\mathcal{D}^\text{$2$-sep}\setminus\mathcal{D}^{1|2|3}$.
The first one of them is
\begin{equation}
\begin{split}
\mathcal{C}^{2.8}&=\cmpl{\mathcal{D}^{1|2|3}}\cap\mathcal{D}^{1|23}\cap\mathcal{D}^{2|13}\cap\mathcal{D}^{3|12}\\
&=\bigl(\mathcal{D}^{1|23}\cap\mathcal{D}^{2|13}\cap\mathcal{D}^{3|12}\bigr)\setminus\mathcal{D}^{1|2|3},
\end{split}
\end{equation}
which is the set of states
which can be written as $1|23$-separable states
(i.e.,convex combinations of $\mathcal{P}^{1|2|3}$ and $\mathcal{P}^{1|23}$ pure states;
the formation is not unique)
and can also be written as $2|13$-separable states
and can also be written as $3|12$-separable states
but cannot be written as $1|2|3$-separable states.
The existence of such states was counterintuitive
because, for pure states, if a tripartite pure state is separable under any $a|bc$ bipartition, then it is fully separable.
For mixed states, however, explicit examples can be constructed \cite{BennettetalUPB,Acinetal3QBMixClass},
which can be written in the form $\sum_ip_i\varrho_{a,i}\otimes\varrho_{bc,i}$ for any $a|bc$ bipartition,
but cannot be written in the form $\sum_ip_i\varrho_{1,i}\otimes\varrho_{2,i}\otimes\varrho_{3,i}$.
Alternatively, we can say that
states of this class can not be mixed without the use of bipartite entanglement,
but they can be mixed by the use of bipartite entanglement
inside only one bipartite subsystem; it does not matter which one.
(This is class $2.8$ in \cite{SeevinckUffinkMixSep}.)
The next three classes are
\begin{equation}
\begin{split}
\mathcal{C}^{2.7.a}&=\cmpl{\mathcal{D}^{a|bc}}\cap\mathcal{D}^{b|ac}\cap\mathcal{D}^{c|ab}\\
&=\bigl(\mathcal{D}^{b|ac}\cap\mathcal{D}^{c|ab}\bigr)\setminus\mathcal{D}^{a|bc}.
\end{split}
\end{equation}
For example, $\mathcal{C}^{2.7.1}$
is the set of states which can be written as $2|13$-separable states
and can also be written as $3|12$-separable states
but cannot be written as $1|23$-separable states.
Alternatively, we can say that
states of this class cannot be mixed by the use of bipartite entanglement only inside the $23$ subsystem,
but they can be mixed by the use of bipartite entanglement
inside either the $12$ or the $13$ subsystems;
both of them are equally suitable.
(These three classes are classes $2.7$, $2.6$, and $2.5$ in \cite{SeevinckUffinkMixSep}.)
The next three classes are
\begin{equation}
\begin{split}
\mathcal{C}^{2.6.a}&=\mathcal{D}^{a|bc}\cap\cmpl{\mathcal{D}^{b|ac}}\cap\cmpl{\mathcal{D}^{c|ab}}\cap\mathcal{D}^{\twoprt{b|ac}{c|ab}}\\
&= \mathcal{D}^{a|bc}\cap\bigl[\mathcal{D}^{\twoprt{b|ac}{c|ab}}\setminus\bigl(\mathcal{D}^{b|ac}\cup\mathcal{D}^{c|ab} \bigr) \bigr].
\end{split}
\end{equation}
For example, $\mathcal{C}^{2.6.1}$
is the set of states which can be written as $1|23$-separable states
and can also be written as states of a new kind:
where the state can be written as
$2|13$-$3|12$-separable states which are neither $2|13$-separable nor $3|12$-separable.
And this is the novelty here.
Alternatively, we can say that
to mix a state of this class
we need bipartite entanglement
either inside the $23$ subsystem,
or inside both of the $12$ and the $13$ subsystems.
(The latter seems like a roundabout connecting the $2$ and $3$ subsystems through the $1$ subsystem.)
The next three classes are
\begin{equation}
\begin{split}
\mathcal{C}^{2.5.a}&=\mathcal{D}^{a|bc}\cap\cmpl{\mathcal{D}^{b|ac}}\cap\cmpl{\mathcal{D}^{c|ab}}\cap\cmpl{\mathcal{D}^{\twoprt{b|ac}{c|ab}}}\\
&\equiv\mathcal{D}^{a|bc}\cap\cmpl{\mathcal{D}^{\twoprt{b|ac}{c|ab}}}
=\mathcal{D}^{a|bc}\setminus\mathcal{D}^{\twoprt{b|ac}{c|ab}}.
\end{split}
\end{equation}
For example, $\mathcal{C}^{2.5.1}$
is the set of states which can be written as $1|23$-separable states
but cannot be written as $2|13$-$3|12$-separable states.
Alternatively, we can say that
states of this class
cannot be mixed by the use of bipartite entanglement only inside both of the $12$ and $13$ subsystems, contrary to $\mathcal{C}^{2.6.1}$.
(The roundabout does not exist here.)
(The unions $\mathcal{C}^{2.6.a}\cup\mathcal{C}^{2.5.a}=\mathcal{D}^{a|bc}\cap\cmpl{\mathcal{D}^{b|ac}}\cap\cmpl{\mathcal{D}^{c|ab}}$
are classes $2.4$, $2.3$, and $2.2$ in \cite{SeevinckUffinkMixSep}.)
The next class is
\begin{equation}
\begin{split}
\mathcal{C}^{2.4}=&\cmpl{\mathcal{D}^{1|23}}\cap\cmpl{\mathcal{D}^{2|13}}\cap\cmpl{\mathcal{D}^{3|12}}\\
&\cap\mathcal{D}^{\twoprt{2|13}{3|12}}\cap\mathcal{D}^{\twoprt{1|23}{3|12}}\cap\mathcal{D}^{\twoprt{1|23}{2|13}}\\
=&\bigl(\mathcal{D}^{\twoprt{2|13}{3|12}}\cap\mathcal{D}^{\twoprt{1|23}{3|12}}\cap\mathcal{D}^{\twoprt{1|23}{2|13}}\bigr)\\
&\setminus\bigl(\mathcal{D}^{1|23}\cup\mathcal{D}^{2|13}\cup\mathcal{D}^{3|12}\bigr)\\
=&\bigl[\mathcal{D}^{\twoprt{2|13}{3|12}}\setminus\bigl(\mathcal{D}^{2|13}\cup\mathcal{D}^{3|12} \bigr) \bigr]\\
&\cap\bigl[\mathcal{D}^{\twoprt{1|23}{3|12}}\setminus\bigl(\mathcal{D}^{1|23}\cup\mathcal{D}^{3|12} \bigr) \bigr]\\
&\cap\bigl[\mathcal{D}^{\twoprt{1|23}{2|13}}\setminus\bigl(\mathcal{D}^{1|23}\cup\mathcal{D}^{2|13} \bigr) \bigr],
\end{split}
\end{equation}
which is the set of states
which can be mixed by the use of bipartite entanglement inside any two bipartite subsystems,
but cannot be mixed by the use of bipartite entanglement inside only one bipartite subsystem.
The next three classes are
\begin{equation}
\begin{split}
\mathcal{C}^{2.3.a}=&\cmpl{\mathcal{D}^{a|bc}}\cap\cmpl{\mathcal{D}^{\twoprt{b|ac}{c|ab}}}\cap
\mathcal{D}^{\twoprt{a|bc}{c|ab}}\cap\mathcal{D}^{\twoprt{a|bc}{b|ac}}\\
=&\Bigl[\bigl[\mathcal{D}^{\twoprt{a|bc}{c|ab}}\setminus\bigl(\mathcal{D}^{c|ab}\cup\mathcal{D}^{a|bc} \bigr) \bigr]\\
&\cap \bigl[\mathcal{D}^{\twoprt{a|bc}{b|ac}}\setminus\bigl(\mathcal{D}^{a|bc}\cup\mathcal{D}^{b|ac} \bigr) \bigr]\Bigr]
\setminus\mathcal{D}^{\twoprt{b|ac}{c|ab}}.
\end{split}
\end{equation}
For example, $\mathcal{C}^{2.3.1}$
is the set of states
which can be mixed by the use of bipartite entanglement inside the $23$ subsystem
together with bipartite entanglement inside either the $12$ or the $13$ subsystems,
but cannot be mixed by the use of bipartite entanglement inside the $12$ \emph{and} the $13$ subsystems only.
(Note that mixing by the use of only one kind of bipartite entanglement has already been excluded.)
The next three classes are
\begin{equation}
\begin{split}
\mathcal{C}^{2.2.a}&=\mathcal{D}^{\twoprt{b|ac}{c|ab}}\cap\cmpl{\mathcal{D}^{\twoprt{a|bc}{c|ab}}}\cap\cmpl{\mathcal{D}^{\twoprt{a|bc}{b|ac}}}\\
&=\mathcal{D}^{\twoprt{b|ac}{c|ab}}\setminus\bigl(\mathcal{D}^{\twoprt{a|bc}{c|ab}}\cup\mathcal{D}^{\twoprt{a|bc}{b|ac}}\bigr).
\end{split}
\end{equation}
For example, $\mathcal{C}^{2.3.1}$
is the set of states
which can be mixed by the use of bipartite entanglement inside both the $12$ and the $13$ subsystems together,
but cannot be mixed by the use of bipartite entanglement inside the $23$ subsystem
together with bipartite entanglement inside only one of the $12$ or the $13$ subsystems.
The next class is
\begin{equation}
\begin{split}
\mathcal{C}^{2.1}&=\cmpl{\mathcal{D}^{\twoprt{2|13}{3|12}}}\cap\cmpl{\mathcal{D}^{\twoprt{1|23}{3|12}}}\cap\cmpl{\mathcal{D}^{\twoprt{1|23}{2|13}}}\cap\mathcal{D}^\text{$2$-sep}\\
&=\mathcal{D}^\text{$2$-sep}\setminus\bigl(\mathcal{D}^{\twoprt{2|13}{3|12}}\cup\mathcal{D}^{\twoprt{1|23}{3|12}}\cup \mathcal{D}^{\twoprt{1|23}{2|13}}\bigr),
\end{split}
\end{equation}
which is the set of states
which can be mixed by the use of bipartite entanglement inside all the three bipartite subsystems,
but cannot be mixed by the use of bipartite entanglement inside only two (or one) bipartite subsystems.
(The union
$\mathcal{C}^{2.4}
\cup\mathcal{C}^{2.3.1}\cup\mathcal{C}^{2.3.2}\cup\mathcal{C}^{2.3.3}
\cup\mathcal{C}^{2.2.1}\cup\mathcal{C}^{2.2.2}\cup\mathcal{C}^{2.2.3}
\cup\mathcal{C}^{2.1}= \mathcal{D}^\text{$2$-sep}\setminus\bigl(\mathcal{D}^{1|23}\cup\mathcal{D}^{2|13}\cup\mathcal{D}^{3|12}\bigr)$
is class $2.1$ in \cite{SeevinckUffinkMixSep}.)
Then come the $2$ classes of states containing genuine tripartite entanglement \cite{Acinetal3QBMixClass},
that is, the subsets in $\mathcal{D}\setminus\mathcal{D}^\text{$2$-sep}$.
The class
\begin{equation}
\mathcal{C}^\text{W}=\cmpl{\mathcal{D}^\text{$2$-sep}}\cap\mathcal{D}^\text{W}
=\mathcal{D}^\text{W}\setminus\mathcal{D}^\text{$2$-sep}
\end{equation}
is the set of states which cannot be mixed
without the use of some tripartite entangled pure states,
but there is no need for GHZ type entanglement \cite{Acinetal3QBMixClass}.
The class
\begin{equation}
\mathcal{C}^\text{GHZ}=\cmpl{\mathcal{D}^\text{W}}\cap\mathcal{D}^{123}
=\mathcal{D}^{123}\setminus\mathcal{D}^\text{W}
\end{equation}
is the set of states which cannot be mixed
without the use of GHZ type entanglement.
All the above classes are PSS classes.
For the PS classification define the class of states containing genuine tripartite entanglement
instead of $\mathcal{C}^\text{W}$ and $\mathcal{C}^\text{GHZ}$:
\begin{equation}
\mathcal{C}^1=\mathcal{C}^\text{W}\cup\mathcal{C}^\text{GHZ}=\mathcal{D}^{123}\setminus\mathcal{D}^\text{$2$-sep}.
\end{equation}
\end{subequations}
Except $\mathcal{C}^3$,
the $\mathcal{C}^\text{\dots}$ PS(S) classes above are neither convex nor closed,
but, by construction, they cover $\mathcal{D}$ entirely.
Unfortunately, we cannot draw an onionlike figure
illustrating these classes, like the one in \cite{SeevinckUffinkMixSep}
(maybe it could be drawn in three dimensions);
we only summarize these $1+18+1+1$ classes in Table~\ref{tab:mixClasses}.
\begin{table*}
\begin{tabular}{cc||c|ccc|ccc|c|cc||ccc}
PSS Class &
PS Class &
$\mathcal{D}^{1|2|3}$ &
$\mathcal{D}^{a|bc}$ &
$\mathcal{D}^{b|ac}$ &
$\mathcal{D}^{c|ab}$ &
$\mathcal{D}^{\twoprt{b|ac}{c|ab}}$ &
$\mathcal{D}^{\twoprt{a|bc}{c|ab}}$ &
$\mathcal{D}^{\twoprt{a|bc}{b|ac}}$ &
$\mathcal{D}^\text{$2$-sep}$ &
$\mathcal{D}^\text{W}$ &
$\mathcal{D}^{123}$ &
in \cite{SeevinckUffinkMixSep} &
in \cite{DurCiracTarrachBMixSep} &
in \cite{Acinetal3QBMixClass} \\
\hline
\hline
$\mathcal{C}^3$ & $\mathcal{C}^3$ & $\subset$ & $\subset$ & $\subset$ & $\subset$ & $\subset$ & $\subset$ & $\subset$ & $\subset$ & $\subset$ & $\subset$ & 3 & 5 & S \\
\hline
$\mathcal{C}^{2.8}$ & $\mathcal{C}^{2.8}$ & $\not\subset$ & $\subset$ & $\subset$ & $\subset$ & $\subset$ & $\subset$ & $\subset$ & $\subset$ & $\subset$ & $\subset$ & 2.8 & 4 & B \\
$\mathcal{C}^{2.7.a}$ & $\mathcal{C}^{2.7.a}$ & $\not\subset$ & $\not\subset$ & $\subset$ & $\subset$ & $\subset$ & $\subset$ & $\subset$ & $\subset$ & $\subset$ & $\subset$ & 2.7,6,5 & 3.3,2,1 & B \\
$\mathcal{C}^{2.6.a}$ & $\mathcal{C}^{2.6.a}$ & $\not\subset$ & $\subset$ & $\not\subset$ & $\not\subset$ & $\subset$ & $\subset$ & $\subset$ & $\subset$ & $\subset$ & $\subset$ & 2.4,3,2 & 2.3,2,1 & B \\
$\mathcal{C}^{2.5.a}$ & $\mathcal{C}^{2.5.a}$ & $\not\subset$ & $\subset$ & $\not\subset$ & $\not\subset$ & $\not\subset$ & $\subset$ & $\subset$ & $\subset$ & $\subset$ & $\subset$ & 2.4,3,2 & 2.3,2,1 & B \\
$\mathcal{C}^{2.4}$ & $\mathcal{C}^{2.4}$ & $\not\subset$ & $\not\subset$ & $\not\subset$ & $\not\subset$ & $\subset$ & $\subset$ & $\subset$ & $\subset$ & $\subset$ & $\subset$ & 2.1 & 1 & B \\
$\mathcal{C}^{2.3.a}$ & $\mathcal{C}^{2.3.a}$ & $\not\subset$ & $\not\subset$ & $\not\subset$ & $\not\subset$ & $\not\subset$ & $\subset$ & $\subset$ & $\subset$ & $\subset$ & $\subset$ & 2.1 & 1 & B \\
$\mathcal{C}^{2.2.a}$ & $\mathcal{C}^{2.2.a}$ & $\not\subset$ & $\not\subset$ & $\not\subset$ & $\not\subset$ & $\subset$ & $\not\subset$ & $\not\subset$ & $\subset$ & $\subset$ & $\subset$ & 2.1 & 1 & B \\
$\mathcal{C}^{2.1}$ & $\mathcal{C}^{2.1}$ & $\not\subset$ & $\not\subset$ & $\not\subset$ & $\not\subset$ & $\not\subset$ & $\not\subset$ & $\not\subset$ & $\subset$ & $\subset$ & $\subset$ & 2.1 & 1 & B \\
\hline
$\mathcal{C}^\text{W}$ & $\mathcal{C}^1$ & $\not\subset$ & $\not\subset$ & $\not\subset$ & $\not\subset$ & $\not\subset$ & $\not\subset$ & $\not\subset$ & $\not\subset$ & $\subset$ & $\subset$ & 1 & 1 & W \\
$\mathcal{C}^\text{GHZ}$ & $\mathcal{C}^1$ & $\not\subset$ & $\not\subset$ & $\not\subset$ & $\not\subset$ & $\not\subset$ & $\not\subset$ & $\not\subset$ & $\not\subset$ & $\not\subset$ & $\subset$ & 1 & 1 & GHZ
\end{tabular}
\caption{PSS classes of mixed three-qubit states
and PS classes of mixed tripartite states.
Additionally, we show the
classifications obtained by
Seevinck and Uffink \cite{SeevinckUffinkMixSep},
D\"ur, Cirac and Tarrach \cite{DurCiracTarrachBMixSep},
and Ac\'in, Bru\ss{}, Lewenstein and Sanpera \cite{Acinetal3QBMixClass}.}
\label{tab:mixClasses}
\end{table*}
The non-emptiness of the PS(S) classes above
is not obvious,
since it depends on the arrangement of different kinds of extremal points.
(We know only that they are not empty \emph{by construction}.)
This issue has not been handled yet,
but experiences in the geometry of mixed states \cite{BengtssonZyczkowski}
suggest that
the arrangement of different kinds of extremal points
leading to some empty classes
would be very implausible.
\subsection{Convex roof quantities}
\label{subsec:Mixed:CRoof}
As a next step, we obtain indicator functions on mixed states
from the pure-state indicator functions (\ref{eq:newPureLUinvs})
by convex roof construction \cite{BennettetalMixedStates,UhlmannFidelityConcurrence,UhlmannConvRoofs}.
In general, let
\begin{equation*}
f:\mathcal{P}\longrightarrow \field{R}
\end{equation*}
be a continuous function.
Then its convex roof extension is defined as
\begin{equation}
\label{eq:cnvroofext}
\begin{split}
\cnvroof{f}&:\mathcal{D}\longrightarrow \field{R},\\
\cnvroof{f}&(\varrho)=\min \sum_i p_i f(\psi_i),
\end{split}
\end{equation}
where the minimization
takes place on all pure-state decompositions of $\varrho$:
$0\leq p_i$, $\sum_i p_i=1$, $\sum_i p_i \cket{\psi_i}\bra{\psi_i}=\varrho$.
The existence of the minimum is crucial for our construction.
It follows from the Schr\"odinger mixture theorem \cite{SchrodingerMixtureThm},
also known as the Gisin-Hughston-Jozsa-Wootters lemma \cite{GisinMixtureThm,HughstonJozsaWoottersMixtureThm}, that
the decompositions for $m$ pure states
are labeled by the elements of the \emph{compact} complex manifold,
called Stiefel manifold, $\mathrm{St}_d(\field{C}^m)= \LieGrp{U}(m)/\LieGrp{U}(m-d)$,
where $d=\dim\mathcal{H}$ \cite{BengtssonZyczkowski}.
The Carath\'eodory theorem ensures that we need only \emph{finite} $m$,
or to be more precise $m\leq d^2$, shown by Uhlmann \cite{UhlmannOptimalDecomp}.
These observations guarantee the existence of the minimum in (\ref{eq:cnvroofext}).
Now, it is easy to prove the following
necessary and sufficient conditions for the PSS subsets (\ref{eq:Dsets})
given by the convex roof extension of the indicator functions (\ref{eq:newPureLUinvs}):
\begin{subequations}
\label{eq:vanishing}
\begin{align}
\label{eq:vanishing:y}
\varrho&\in\mathcal{D}^{1|2|3}&
\quad&\Longleftrightarrow&\quad \cnvroof{y}(\varrho)&=0,\\
\label{eq:vanishing:sa}
\varrho&\in\mathcal{D}^{a|bc}&
\quad&\Longleftrightarrow&\quad \cnvroof{s}_a(\varrho)&=0,\\
\label{eq:vanishing:ga}
\varrho&\in\mathcal{D}^{\twoprt{b|ac}{c|ab}}&
\quad&\Longleftrightarrow&\quad \cnvroof{g}_a(\varrho)&=0,\\
\label{eq:vanishing:t}
\varrho&\in\mathcal{D}^\text{$2$-sep}&
\quad&\Longleftrightarrow&\quad \cnvroof{t}(\varrho)&=0,\\
\varrho&\in\mathcal{D}^\text{W}&
\quad&\Longleftrightarrow&\quad \cnvroof{{\tau^2}}(\varrho)&=0.
\end{align}
\end{subequations}
To see the \textit{$\Rightarrow$ implications,}
observe that all the $\mathcal{D}^\text{\dots}$ PSS subsets are the convex hulls of
such pure states [see in (\ref{eq:Dsets})] for which the given functions
vanish [see in Table \ref{tab:pureSLOCC3}].
Since these functions can take only non-negative values,
the minimum in the convex roof extension is zero.
To see the \textit{$\Leftarrow$ implications,}
note that
if the convex roof extension of a non-negative function vanishes
then there exists a decomposition for pure states for which the function vanishes.
Again, the vanishing of a given function
singles out the pure states [see in Table \ref{tab:pureSLOCC3}]
from which the states of the given $\mathcal{D}^\text{\dots}$ PSS subset can be mixed [see in (\ref{eq:Dsets})].
The necessary and sufficient conditions for the PSS subsets (\ref{eq:vanishing})
yields necessary and sufficient conditions for the PSS classes,
and we can fill out Table \ref{tab:mixIdent}
for the identification of the PSS classes of Table \ref{tab:mixClasses}, given for mixed states,
similar to Table \ref{tab:pureSLOCC3}, given for pure states.
\begin{table*}
\begin{tabular}{c||c|ccc|ccc|c|c}
Class
& $\cnvroof{y}(\varrho)$ &
$\cnvroof{s}_a(\varrho)$ & $\cnvroof{s}_b(\varrho)$ & $\cnvroof{s}_c(\varrho)$ &
$\cnvroof{g}_a(\varrho)$ & $\cnvroof{g}_b(\varrho)$ & $\cnvroof{g}_c(\varrho)$ &
$\cnvroof{t}(\varrho)$ & $\cnvroof{{\tau^2}}(\varrho)$ \\
\hline
\hline
$\mathcal{C}^3$ & $=0$ & $=0$ & $=0$ & $=0$ & $=0$ & $=0$ & $=0$ & $=0$ & $=0$ \\
\hline
$\mathcal{C}^{2.8}$ & $>0$ & $=0$ & $=0$ & $=0$ & $=0$ & $=0$ & $=0$ & $=0$ & $=0$ \\
$\mathcal{C}^{2.7.a}$ & $>0$ & $>0$ & $=0$ & $=0$ & $=0$ & $=0$ & $=0$ & $=0$ & $=0$ \\
$\mathcal{C}^{2.6.a}$ & $>0$ & $=0$ & $>0$ & $>0$ & $=0$ & $=0$ & $=0$ & $=0$ & $=0$ \\
$\mathcal{C}^{2.5.a}$ & $>0$ & $=0$ & $>0$ & $>0$ & $>0$ & $=0$ & $=0$ & $=0$ & $=0$ \\
$\mathcal{C}^{2.4}$ & $>0$ & $>0$ & $>0$ & $>0$ & $=0$ & $=0$ & $=0$ & $=0$ & $=0$ \\
$\mathcal{C}^{2.3.a}$ & $>0$ & $>0$ & $>0$ & $>0$ & $>0$ & $=0$ & $=0$ & $=0$ & $=0$ \\
$\mathcal{C}^{2.2.a}$ & $>0$ & $>0$ & $>0$ & $>0$ & $=0$ & $>0$ & $>0$ & $=0$ & $=0$ \\
$\mathcal{C}^{2.1}$ & $>0$ & $>0$ & $>0$ & $>0$ & $>0$ & $>0$ & $>0$ & $=0$ & $=0$ \\
\hline
$\mathcal{C}^\text{W}$ & $>0$ & $>0$ & $>0$ & $>0$ & $>0$ & $>0$ & $>0$ & $>0$ & $=0$ \\
$\mathcal{C}^\text{GHZ}$ & $>0$ & $>0$ & $>0$ & $>0$ & $>0$ & $>0$ & $>0$ & $>0$ & $>0$
\end{tabular}
\caption{PSS classes of mixed three-qubit states given in table \ref{tab:mixClasses}
identified by the vanishing of the mixed indicator functions
(convex roof extension of the indicator functions (\ref{eq:newPureLUinvs})).}
\label{tab:mixIdent}
\end{table*}
Because of their vanishing properties, we call the convex roof extension of pure indicator functions
\emph{mixed indicator functions}.
Note that the convex roof extension is a nonlinear operation:
$\cnvroof{(f_1+f_2)}\neq \cnvroof{f_1}+\cnvroof{f_2}$.
However, an inequality holds,
for example, $s_a=g_b+g_c$ and
$\cnvroof{s}_a=\cnvroof{(g_b+g_c)}\geq\cnvroof{g}_b+\cnvroof{g}_c$,
so $\cnvroof{s}_a$ can be nonzero even if both $\cnvroof{g}_b$ and $\cnvroof{g}_c$ are zero.
This is why we could identify $21$ classes of mixed states
by the use of the convex roof extension of functions
which identify only $6$ classes of state vectors.
On the other hand, if a classification does not involve all the PS(S) subsets,
then, through (\ref{eq:vanishing}),
we have to use only some of the indicator functions,
for example, $y$, $s_a$ and $t$ for the classification obtained by Seevinck and Uffink \cite{SeevinckUffinkMixSep},
$y$ and $s_a$ for the classification obtained by D\"ur, Cirac and Tarrach \cite{DurCiracTarrachBMixSep},
and $y$, $t$ and $\tau^2$ for the classification obtained by Ac\'in, Bru\ss{}, Lewenstein and Sanpera \cite{Acinetal3QBMixClass}.
\section{Examples}
\label{sec:Xmpl}
At this point, the most important question is whether all of the PSS classes in (\ref{eq:Classes}) are nonempty.
Of course, this can be checked by the use of (\ref{eq:vanishing}),
but calculating convex roof extensions symbolically is a hard problem.
Here we give considerations apart from convex roofs.
The classes given by Seevinck and Uffink in \cite{SeevinckUffinkMixSep} are nonempty,
which are $\mathcal{C}^{3}$,
$\mathcal{C}^{2.8}$,
$\mathcal{C}^{2.7.a}$,
the unions $\mathcal{C}^{2.6.a}\cup\mathcal{C}^{2.5.a}$,
the union $\mathcal{C}^{2.4}
\cup\mathcal{C}^{2.3.1}\cup\mathcal{C}^{2.3.2}\cup\mathcal{C}^{2.3.3}
\cup\mathcal{C}^{2.2.1}\cup\mathcal{C}^{2.2.2}\cup\mathcal{C}^{2.2.3}
\cup\mathcal{C}^{2.1}$,
and $\mathcal{C}^\text{W}\cup\mathcal{C}^\text{GHZ}$.
Both of the classes $\mathcal{C}^\text{W}$ and $\mathcal{C}^\text{GHZ}$ are nonempty \cite{Acinetal3QBMixClass}.
On the other hand, the pure sets (\ref{eq:Psets}) are contained by the following classes:
$\mathcal{P}^{1|2|3} \subset\mathcal{C}^3$,
$\mathcal{P}^{a|bc} \subset\mathcal{C}^{2.5.a}$,
$\mathcal{P}^\text{W} \subset\mathcal{C}^\text{W}$,
$\mathcal{P}^\text{GHZ}\subset\mathcal{C}^\text{GHZ}$,
so we have additionally that $\mathcal{C}^{2.5.a}$ is nonempty.
In the next paragraphs,
we construct states contained in classes $\mathcal{C}^{2.2.a}$ and $\mathcal{C}^{2.1}$.
This justifies the use of $b|ac$-$c|ab$-separable sets in the classification
(since we can distinguish between $\mathcal{C}^{2.2.a}$ and $\mathcal{C}^{2.1}$ by the use of these),
although the nonemptiness of
$\mathcal{C}^{2.6.a}$,
$\mathcal{C}^{2.4}$, and
$\mathcal{C}^{2.3.a}$
has not been shown yet.
From the point of view of ``mixtures of extremal points,''
it is easy to check that
the bipartite subsystems are separable for states in some PS subsets as follows:
\begin{align*}
\varrho&\in\mathcal{D}^{1|2|3}
&\Longrightarrow&
&\text{$\varrho_{23}$ sep. and $\varrho_{13}$ sep. and $\varrho_{12}$ sep.}\\
\varrho&\in\mathcal{D}^{a|bc}
&\Longrightarrow&
&\text{\phantom{$\varrho_{23}$ sep. and } $\varrho_{ac}$ sep. and $\varrho_{ab}$ sep.}\\
\varrho&\in\mathcal{D}^{\twoprt{b|ac}{c|ab}}
&\Longrightarrow&
&\text{$\varrho_{bc}$ sep. \phantom{and $\varrho_{13}$ sep. and $\varrho_{12}$ sep.}}
\end{align*}
Unfortunately, the reverse implications are not true.
For example, for the standard GHZ state (\ref{eq:GHZ}),
all bipartite subsystems are separable,
although $\cket{\text{GHZ}}\bra{\text{GHZ}}\notin\mathcal{D}^{1|2|3}$.
However, the negation of the implications above will turn out to be useful:
\begin{align*}
\varrho&\notin\mathcal{D}^{1|2|3}
&\Longleftarrow&
&\text{$\varrho_{23}$ ent. or $\varrho_{13}$ ent. or $\varrho_{12}$ ent.}\\
\varrho&\notin\mathcal{D}^{a|bc}
&\Longleftarrow&
&\text{\phantom{$\varrho_{23}$ ent. or } $\varrho_{ac}$ ent. or $\varrho_{ab}$ ent.}\\
\varrho&\notin\mathcal{D}^{\twoprt{b|ac}{c|ab}}
&\Longleftarrow&
&\text{$\varrho_{bc}$ ent. \phantom{or $\varrho_{13}$ ent. or $\varrho_{12}$ ent.}}
\end{align*}
The entanglement of two-qubit subsystems can be easily checked,
for example, by the Peres-Horodecki criterion \cite{PeresCrit,HorodeckiPosMapWitness}:
\begin{equation}
\label{eq:PPT}
\text{$\omega$ separable}\quad\Longleftrightarrow\quad
\ptransp{\omega}{1}\geq 0.
\end{equation}
Here $\omega\in\mathcal{D}(\mathcal{H}^b\otimes\mathcal{H}^c)$,
and $\ptransp{\;}{1}$ means transposition on the first subsystem,
which, although it is basis-dependent, the positivity of $\ptransp{\omega}{1}$ is not.
The $\Leftarrow$ implication in (\ref{eq:PPT}) holds only for qubit-qubit or qubit-qutrit systems.
Now, take a $\varrho\in\mathcal{D}^{\twoprt{2|13}{3|12}}$.
Then $\varrho_{23}$ is always separable,
but if both $\varrho_{12}$, and $\varrho_{13}$ are entangled, then
by the above observations we have
$\varrho\notin\mathcal{D}^{1|23}$,
$\varrho\notin\mathcal{D}^{2|13}$, $\varrho\notin\mathcal{D}^{3|12}$,
moreover,
$\varrho\notin\mathcal{D}^{\twoprt{1|23}{2|13}}$, and
$\varrho\notin\mathcal{D}^{\twoprt{1|23}{3|12}}$.
This singles out exactly one class from Table \ref{tab:mixClasses}, namely $\mathcal{C}^{2.2.1}$.
So if we can mix a state $\varrho$ from $\mathcal{P}^{1|2|3}$, $\mathcal{P}^{b|ac}$, and $\mathcal{P}^{c|ab}$,
whose $\varrho_{ab}$ and $\varrho_{ac}$ subsystems are entangled,
then $\varrho\in\mathcal{C}^{2.2.a}$.
For example, such a state is the uniform mixture of projectors to the
$\cket{0}_b\otimes\cket{\text{B}}_{ac}$ and
$\cket{0}_c\otimes\cket{\text{B}}_{ab}$
vectors:
\begin{equation*}
\frac12 \cket{0}\bra{0}_b\otimes\cket{\text{B}}\bra{\text{B}}_{ac}+
\frac12 \cket{0}\bra{0}_c\otimes\cket{\text{B}}\bra{\text{B}}_{ab}
\in\mathcal{C}^{2.2.a},
\end{equation*}
where $\cket{\text{B}}$ is the usual Bell state (\ref{eq:B}).
Now, take a $\varrho\in\mathcal{D}^\text{$2$-sep}$.
Then if the states of all the two-qubit subsystems are entangled,
by the above observations we have
$\varrho\notin\mathcal{D}^{1|23}$,
$\varrho\notin\mathcal{D}^{2|13}$, $\varrho\notin\mathcal{D}^{3|12}$,
moreover,
$\varrho\notin\mathcal{D}^{\twoprt{2|13}{3|12}}$,
$\varrho\notin\mathcal{D}^{\twoprt{1|23}{3|12}}$, and
$\varrho\notin\mathcal{D}^{\twoprt{1|23}{2|13}}$.
This singles out exactly one class from Table \ref{tab:mixClasses}, namely $\mathcal{C}^{2.1}$.
So if we can mix a state $\varrho$ from $\mathcal{P}^{1|2|3}$, $\mathcal{P}^{1|23}$, $\mathcal{P}^{2|13}$, and $\mathcal{P}^{3|12}$,
whose all two-qubit subsystems are entangled,
then $\varrho\in\mathcal{C}^{2.1}$.
For example, such a state is the mixture of projectors to the
previous two vectors together with
$\cket{1}_a\otimes\cket{\text{B}}_{bc}$:
\begin{equation*}
\begin{split}
\frac14 \cket{0}\bra{0}_b\otimes\cket{\text{B}}\bra{\text{B}}_{ac}+
\frac14 \cket{0}\bra{0}_c\otimes\cket{\text{B}}\bra{\text{B}}_{ab}\\
+\frac12 \cket{1}\bra{1}_a\otimes\cket{\text{B}}\bra{\text{B}}_{bc}
\in\mathcal{C}^{2.1}.
\end{split}
\end{equation*}
\section{Generalizations I. -- Three subsystems}
\label{sec:GenThreePart}
The considerations written out in detail in Secs.~\ref{sec:Pure} and \ref{sec:Mixed}
contain the main ideas which will be generalized in this and in the next sections.
In this section we break up with qubits, and consider tripartite systems
composed from \emph{subsystems of arbitrary dimensions.}
Obviously, this has no influence on the PS sets and PS classes,
given in Secs.~\ref{subsec:Mixed:Subsets} and \ref{subsec:Mixed:Classes},
the only question is about the construction of mixed-state indicator functions of Sec.~\ref{subsec:Mixed:CRoof}.
The generalization to \emph{arbitrary number of subsystems} is left to the next section.
\subsection{Pure state indicator functions
for tripartite systems from the FTS approach}
\label{subsec:GenThreePart:FTS}
To get the necessary and sufficient conditions for the PS classes in the tripartite case,
we need the generalizations of the pure-state indicator functions in (\ref{eq:newPureLUinvs:y})--(\ref{eq:newPureLUinvs:t}).
Apart from continuity,
the main---and only---requirement for these is
to satisfy the vanishing requirements for pure states given in Table \ref{tab:pureSLOCC3}
(apart from the column $\tau^2$,
and for row $\mathcal{V}^{123}$ instead of rows $\mathcal{V}^\text{W}$ and $\mathcal{V}^\text{GHZ}$).
Then
their convex roof extensions satisfy the vanishing requirements for mixed states given in Table \ref{tab:mixIdent}
(apart from the column $\cnvroof{{\tau^2}}$,
and for row $\mathcal{C}^{1}$ instead of rows $\mathcal{C}^\text{W}$ and $\mathcal{C}^\text{GHZ}$),
since in (\ref{eq:vanishing:y})--(\ref{eq:vanishing:t})
we have used only the vanishing-requirements for pure states.
The pure-state indicator functions of (\ref{eq:newPureLUinvs}) have been obtained in the FTS approach,
which works only for the qubit case.
However, some parts of the definitions can be generalized.
To do this, our basic quantities will be the local entropies
$s_a(\psi)=S_q(\pi_a)$ instead of the functions $g_a(\psi)$ given in (\ref{eq:newPureLUinvs:ga}),
since the former ones are defined for all dimensions.
[Here we use the notation for the projector $\pi=\cket{\psi}\bra{\psi}$,
and $\pi_a=\tr_{bc}(\pi)$.]
The most basic quantum entropy is the \emph{von Neumann entropy},
\begin{equation}
\label{eq:Neumann}
S(\varrho)=-\tr\bigl[\varrho\ln(\varrho)\bigr],
\end{equation}
having the strongest properties among all entropies.
The \emph{Tsallis entropy,} sometimes called $q$-entropy, in the quantum case is defined as
\begin{subequations}
\begin{equation}
\label{eq:Tsallis}
S_q(\varrho)=\frac{1}{1-q}\bigl[\tr(\varrho^q)-1\bigr],\qquad q>0,
\end{equation}
which is a nonadditive generalization of the von Neumann entropy:
$\lim_{q\to1}S_q(\varrho)=S(\varrho)$.
Again, as in (\ref{eq:pureLUinvs:sa}),
we can use the concurrence-squared, which is the normalized Tsallis entropy of parameter $2$:
\begin{equation}
\label{eq:conc2}
C^2(\varrho)=\frac{d}{d-1}S_2(\varrho)=\frac{d}{d-1}\bigl[1-\tr(\varrho^2)\bigr],
\end{equation}
\end{subequations}
if we prefer to deal with \emph{polynomials} in the $\psi^{ijk}$ and $\cc{(\psi^{ijk})}$ coefficients.
This is the nontrivial polynomial of the lowest degree which is also an entropy, that is, Schur-concave,
so tells us something about mixedness.
[In (\ref{eq:conc2}), $d$ is the dimension of the Hilbert space on which $\varrho$ acts,
so the prefactor $\frac{d}{d-1}$ ensures that $0\leq C^2(\varrho)\leq1$.]
Obviously, for all Tsallis entropies of the subsystems,
$s_a(\psi)=S_q(\pi_a)$ fulfils the corresponding column of Table \ref{tab:pureSLOCC3},
since it vanishes if and only if the subsystem is pure,
which means the separability of that subsystem from the rest of the system
if the whole system is in pure state.
From (\ref{eq:newPureLUinvs:sa}) and (\ref{eq:newPureLUinvs:ga}),
it turns out that $y$, given in (\ref{eq:newPureLUinvs:y}),
is just the average of the local entropies $y=\frac13(s_1+s_2+s_3)$,
vanishing if and only if no entanglement is present.
This works well not only for qubits, so we can keep this definition of $y$.
The functions $g_a$ in (\ref{eq:newPureLUinvs:ga}) can also be expressed by the local entropies (\ref{eq:newPureLUinvs:sa})
for qubits as $g_a=\frac12(s_b+s_c-s_a)$.
Can this definition be kept for subsystems of arbitrary dimensions?
For $\mathcal{V}^{1|2|3}$, obviously $g_a=0$.
For $\mathcal{V}^{a|bc}$, the subsystem $a$ can be separated from the others
so the subsystems $a$ and $bc$ are in pure states, $s_a=0$ and $s_b=s_c\neq0$,
from which $g_a\neq0$ and $g_b=g_c=0$.
So the first five rows of the $g_a$ columns of Table \ref{tab:pureSLOCC3} is fulfilled.
For the last row, we need that $g_a>0$
when genuine tripartite entanglement is present.
This is the problematic point.
This question can be traced back to the subadditivity of the Tsallis entropies.
Raggio's conjecture \cite{RaggioTsallis}
about that is twofold: For $q>1$,
\begin{subequations}
\label{eq:Raggio}
\begin{align}
\label{eq:Raggio:subadd}
S_q(\varrho) &\leq S_q(\varrho_1) + S_q(\varrho_2),\\
\label{eq:Raggio:add}
S_q(\varrho) &= S_q(\varrho_1) + S_q(\varrho_2)
\;\Longleftrightarrow\;
\left\{\begin{aligned}
&\varrho = \varrho_1\otimes \varrho_2,\\
&\text{$\varrho_1$ or $\varrho_2$ pure}.
\end{aligned}\right.
\end{align}
\end{subequations}
[Note that for $0<q<1$, there is no definite relation between $S_q(\varrho)$ and $S_q(\varrho_1) + S_q(\varrho_2)$.]
Both statements hold for the classical scenario \cite{RaggioTsallis},
which can be modeled in the quantum scenario
by density matrices being LU equivalent to diagonal ones.
The first part (\ref{eq:Raggio:subadd}) of the conjecture
has been proven by Audenaert \cite{AudenaertTsallisSubadd}.
This guarantees the non-negativity of the functions $g_a$:
For pure states,
$S_q(\pi_a)=S_q(\pi_{bc})\leq S_q(\pi_{b}) + S_q(\pi_{c})$,
so $0\leq \frac12(s_b+s_c-s_a) = g_a$.
On the other hand, (\ref{eq:Raggio:add}) is exactly what we need:
$\cket{\psi}\in\mathcal{V}^{123}$ if and only if
neither of its subsystems are pure, which means that
there is subadditivity in a strict sense,
so $0 < \frac12(s_b+s_c-s_a) = g_a$.
The $\Leftarrow$ implication in (\ref{eq:Raggio:add}) holds obviously,
but the whole second part (\ref{eq:Raggio:add}) of the conjecture,
to our knowledge, has not been proven yet.
A very little side result of our work is that
Raggio's conjecture holds
for the very restricted case of two-qubit mixed states which are, at the most, of rank 2.
We note that the von Neumann entropies ($q\to1$) of the subsystems \emph{are not suitable} for the role of the functions $s_a$,
if we want to write the functions $g_a$ by that as $\frac12(s_b+s_c-s_a)$,
since the von Neumann entropy is additive for product states
without any reference to the purity of the subsystems:
\begin{subequations}
\label{eq:NeumannProp}
\begin{align}
\label{eq:NeumannProp:subadd}
S(\varrho) &\leq S(\varrho_1) + S(\varrho_2),\\
\label{eq:NeumannProp:add}
S(\varrho) &= S(\varrho_1) + S(\varrho_2)
\;\Longleftrightarrow\;
\varrho = \varrho_1\otimes \varrho_2.
\end{align}
\end{subequations}
Indeed, it is easy to construct a tripartite state,
which is not separable under any partition, but has vanishing $g_a$ (defined by the von Neumann entropy).
For example, let $\dim\mathcal{H}^a=4$,
then for the state
\begin{equation*}
\cket{\psi}=\frac12\bigl(\cket{000}+\cket{101}+\cket{210}+\cket{311}\bigr)
\end{equation*}
$\pi_{23}=\pi_2\otimes\pi_3$, so $g_1(\psi)=\frac12\bigl(S(\pi_2)+S(\pi_3)-S(\pi_1)\bigr)=0$,
while $S(\pi_1)=\ln4$, and $S(\pi_2)=S(\pi_3)=\ln2$, so neither of the subsystems are pure,
the state is genuinely tripartite entangled.
The \emph{R\'enyi entropy} is defined as
\begin{equation}
\label{eq:Renyi}
S^\text{R}_q(\varrho)=\frac{1}{1-q}\ln\bigl[\tr(\varrho^q)\bigr],\qquad q>0,
\end{equation}
which is another generalization of the von Neumann entropy:
$\lim_{q\to1}S^R_q(\varrho)=S(\varrho)$,
having the advantage of additivity:
\begin{equation}
S^\text{R}_q(\varrho)=S^\text{R}_q(\varrho_1)+S^\text{R}_q(\varrho_2)
\quad\Longleftarrow\quad \varrho = \varrho_1\otimes \varrho_2.
\end{equation}
This is an advantage when entanglement is studied in the asymptotic regime,
when the state is present in multiple copies
and properties are investigated against the number of copies.
Again, this advantage is a disadvantage from our point of view,
the R\'enyi entropies of the subsystems \emph{are not suitable} for the role of the functions $s_a$,
if we want to write the functions $g_a$ by that as $\frac12(s_b+s_c-s_a)$.
Moreover, subadditivity does not hold for R\'enyi entropy,
so the non-negativity of the functions $g_a$ defined by R\'enyi entropies
is not even guaranteed.
(For further properties and references on the quantum entropies,
see, for example, \cite{BengtssonZyczkowski,OhyaPetzQEntr,Petzfdivergence,FuruichiTsallis}.)
\subsection{Pure state indicator functions
for tripartite systems outside the FTS approach}
\label{subsec:GenThreePart:nFTS}
Fortunately, it is easy to define the pure-state indicator functions
of three subsystems of arbitrary dimensions
without the issues of equality in the subadditivity of $q$-entropies.
Again, the basic quantities are the local entropies,
and we use a ``multiplicative'' definition for the functions $g_a$
instead of the ``additive'' one, which came from the FTS approach,
\begin{subequations}
\label{eq:genNewPureLUinvs}
\begin{align}
y(\psi) &= s_1(\psi) + s_2(\psi) + s_3(\psi),\\
s_a(\psi) &= S_q(\pi_a),\\
g_a(\psi) &= s_b(\psi)s_c(\psi),\\
t(\psi) &= s_1(\psi)s_2(\psi)s_3(\psi).
\end{align}
\end{subequations}
These functions obviously reproduce the relevant part of Table \ref{tab:pureSLOCC3},
so, by (\ref{eq:vanishing:y})--(\ref{eq:vanishing:t}),
their convex roof extensions reproduce Table \ref{tab:mixIdent}
for the identification of the PS classes of the tripartite case given in Table \ref{tab:mixClasses}.
The structure of the formulas above give us a hint
for the generalization for arbitrary number of subsystems of arbitrary dimensions:
Te just have to play a game with
statements like ``being zero,'' with the logical connectives ``and'' and ``or,''
parallel to the addition and multiplication,
and also parallel to the set-theoretical inclusion, union, and intersection.
\section{Generalizations II. -- Partial separability of multipartite systems}
\label{sec:Gen}
In the previous sections,
we have followed a didactic treatment
in order to illustrate the main concept;
now it is high time to turn to abstract definitions
to handle the PS classification and criteria
for arbitrary number of subsystems of arbitrary dimensions.
For $n$ subsystems, the set of the labels of the subsystems is $L=\{1,2,\dots,n\}$.
Let $\alpha=L_1|L_2|\dots|L_k$ denote a $k$-partite split,
that is, a partition of the labels
into $k$ disjoint non-empty sets $L_r$,
where $L_1\cup L_2\cup\dots\cup L_k=L$.
For two partitions, $\beta$ and $\alpha$,
$\beta$ is contained in $\alpha$,
denoted as $\beta\preceq\alpha$,
if $\alpha$ can be obtained from $\beta$ by joining some---maybe neither---of the parts of $\beta$.
This defines a partial order on the partitions.
[It is easy to see from the definition that
$\alpha\preceq\alpha$ (reflexivity);
if $\gamma\preceq\beta$ and $\beta\preceq\alpha$ then $\gamma\preceq\alpha$ (transitivity);
if $\beta\preceq\alpha$ and $\alpha\preceq\beta$ then $\alpha=\beta$ (antisymmetry).]
For example, for the tripartite case $1|2|3\preceq a|bc\preceq 123$.
Since
there is a greatest and a smallest element
(the full $n$-partite split and the trivial partition without split, respectively,
$1|2|\dots|n\preceq\alpha\preceq12\dots n$,)
the set of partitions of $L$ for $\preceq$ forms a bounded lattice.
\subsection{PS subsets in general}
\label{subsec:Gen:PSsubsets}
The first point is the generalization of the PS subsets $\mathcal{D}^\text{\dots}$.
Let $\mathcal{P}^\alpha$ be the set of pure states
which are separable under the partition $\alpha=L_1|L_2|\dots|L_k$,
but not separable under any $\beta\prec\alpha$.
Then the PS subset of \emph{$\alpha$-separable states} is
\begin{subequations}
\label{eq:genDsets}
\begin{equation}
\label{eq:genDsets:alpha}
\mathcal{D}^\alpha = \Conv \bigcup_{\beta\preceq\alpha}\mathcal{P}^\beta,
\end{equation}
which is a special case
of the PS subsets of \emph{$\vs{\alpha}$-separable states}
\begin{equation}
\label{eq:genDsets:alphal}
\mathcal{D}^{\vs{\alpha}} = \Conv \bigcup_{\alpha\in\vs{\alpha}} \bigcup_{\beta\preceq\alpha}\mathcal{P}^\beta
\equiv \Conv \bigcup_{\alpha\in\vs{\alpha}} \mathcal{D}^{\alpha},
\end{equation}
\end{subequations}
with the \emph{label} $\vs{\alpha}$ being an arbitrary \emph{set} of partitions.
[In the writing we omit the $\{\dots\}$ set brackets, as was seen in, e.g.,(\ref{eq:Dsets:bcacab}).]
The set of $k$-separable states $\mathcal{D}^\text{$k$-sep}$ arises as a special case
where the $\alpha$ elements of $\vs{\alpha}$ are all the possible $k$-partite splits.
Note that in general, the $\alpha$ partitions are not required to be $k$-partite splits for the same $k$.
This freedom can not be seen in the case of three subsystems.
The $\mathcal{P}^\alpha$ sets are not closed if and only if $\alpha$ is not the full $n$-partite split $1|2|\dots|n$,
but $\cup_{\beta\preceq\alpha}\mathcal{P}^\beta$ is closed,
so the sets $\mathcal{D}^{\vs{\alpha}}$ are closed, and convex by construction.
Note that different $\vs{\alpha}$ labels can give rise to the same $\mathcal{D}^{\vs{\alpha}}$ sets;
in other words,
the $\vs{\alpha}\mapsto \mathcal{D}^{\vs{\alpha}}$ ``labeling map'' defined by (\ref{eq:genDsets:alphal})
is surjective but not injective.
For the full PS classification we need all the possible \emph{different} $\mathcal{D}^{\vs{\alpha}}$ sets.
Because of the nontrivial structure of the lattice of partitions,
obtaining all the different PS sets is also a nontrivial task.
We cannot provide a closed formula for that, but only an algorithm.
Before we do this, we need some constructions.
First, observe that
if $\beta\preceq\alpha$ then $\mathcal{D}^\beta\subseteq\mathcal{D}^\alpha$
[from definition (\ref{eq:genDsets:alpha}), and the transitivity of $\preceq$],
from which it follows that
for the labels $\vs{\beta}$ and $\vs{\alpha}$,
if for every $\beta\in\vs{\beta}$ there is an $\alpha\in\vs{\alpha}$ for which $\beta\preceq\alpha$
then $\mathcal{D}^{\vs{\beta}}\subseteq\mathcal{D}^{\vs{\alpha}}$.
[From definition (\ref{eq:genDsets:alphal}). We will prove the reverse too.]
These observations motivate the extension of
$\preceq$ from the partitions to the labels:
\begin{equation}
\label{eq:labelreldef}
\vs{\beta}\preceq\vs{\alpha}
\qquad\overset{\text{def.}}{\Longleftrightarrow}\qquad
\forall \beta\in\vs{\beta}, \exists \alpha\in\vs{\alpha}:\; \beta\preceq\alpha.
\end{equation}
Note that, at this point,
the relation $\preceq$ on the labels is not a partial order,
only the reflexivity and the transitivity properties hold for that.
The antisymmetry property fails, which is the consequence of
that the definition (\ref{eq:labelreldef}) was motivated by the inclusion of the PS sets,
and different $\vs{\alpha}$s can lead to the same PS set.
Independently of this problem, which will be handled later, the following is true:
\begin{equation}
\label{eq:relationEquiv}
\vs{\beta}\preceq\vs{\alpha} \qquad\Longleftrightarrow\qquad
\mathcal{D}^{\vs{\beta}}\subseteq\mathcal{D}^{\vs{\alpha}}.
\end{equation}
For the proof, see Appendix \ref{appsubsec:Gen:Incl}.
Again, note that the relations $\preceq$ and $\subseteq$
are defined on nonisomorphic sets,
so (\ref{eq:relationEquiv}) does not contradict the fact
that the latter is a partial order while the former is not.
The next step is to define
those labels for which $\preceq$ will be a partial order.
A label $\vs{\alpha}$ is called a \emph{proper label} if
\begin{equation}
\label{eq:properlabel}
\forall \alpha,\alpha'\in\vs{\alpha},\; \alpha\neq\alpha'\quad\Longrightarrow\qquad \alpha\npreceq\alpha'.
\end{equation}
On the set of proper labels, the relation $\preceq$ defined in (\ref{eq:labelreldef})
is a partial order.
For the proof, see Appendix \ref{appsubsec:Gen:pOrder}.
A corollary is that
the set of proper labels for $\preceq$ forms a bounded lattice,
its greatest and smallest elements are the one-element labels
of full $n$-partite split and the trivial partition without split, respectively:
$1|2|\dots|n\preceq\vs{\alpha}\preceq12\dots n$.
Is it true that every PS subset can be labeled by proper label?
Do different proper labels lead to different PS subsets?
In other words,
is the $\vs{\alpha}\mapsto \mathcal{D}^{\vs{\alpha}}$ ``labelling map''
from the \emph{set of proper labels} to the set of PS subsets
an isomorphism?
The injectivity is the $\Leftarrow$ implication from the observation, that
for $\vs{\alpha}$, $\vs{\beta}$, proper labels
\begin{equation}
\label{eq:propInj}
\vs{\beta}=\vs{\alpha}
\qquad\Longleftrightarrow\qquad
\mathcal{D}^{\vs{\beta}}=\mathcal{D}^{\vs{\alpha}}.
\end{equation}
For the proof, see Appendix \ref{appsubsec:Gen:propInj}.
If $\vs{\beta}$ is a label,
then we can obtain a unique proper label from that,
if we drop every $\beta\in\vs{\beta}$ for which there is a $\beta'\in\vs{\beta}$ for which $\beta\preceq\beta'$.
The remaining partitions form a proper label which we denote $\vs{\alpha}$,
and the partitions which have been dropped out form a label which we denote $\vs{\gamma}$.
Then $\vs{\beta}=\vs{\alpha}\vs{\gamma}$, which means the union of labels $\vs{\alpha}$ and $\vs{\gamma}$.
(We omit the union sign too.)
Our next observation is useful for this case.
For the general labels $\vs{\alpha}$ and $\vs{\gamma}$,
\begin{equation}
\label{eq:propPart}
\vs{\gamma}\preceq\vs{\alpha}
\qquad\Longleftrightarrow\qquad
\mathcal{D}^{\vs{\alpha}\vs{\gamma}}=\mathcal{D}^{\vs{\alpha}},
\end{equation}
which means that when we obtain a proper label $\vs{\alpha}$
from a general label $\vs{\beta}$,
as was done above,
both of these lead to the same PS subset.
For the proof, see Appendix \ref{appsubsec:Gen:propPart}.
Since all PS subsets arise from general labels,
the above shows that they arise also from proper labels,
which is the surjectivity of the labeling by proper labels.
Now we have that the set of proper labels is isomorphic to the set of PS subsets.
The former one is much easier to handle.
Moreover, (\ref{eq:relationEquiv}) states now that
the lattice of $\vs{\alpha}$ proper labels with respect to the partial order $\preceq$
is isomorphic to
the lattice of $\mathcal{D}^{\vs{\alpha}}$ PS subsets with respect to the partial order $\subseteq$.
(This lattice is the generalization of the ``inclusion hierarchy'' in Fig.~\ref{fig:incl}.)
To get all the PS subsets, we have to obtain all the proper labels.
A brute-force method for this
is to form all the $\vs{\beta}$ labels (all the subsets of the set of all partitions),
then obtain the proper labels $\vs{\alpha}$ as before
($\vs{\beta}=\vs{\alpha}\vs{\gamma}$)
and keep the different proper labels obtained in this way.
A much more sophisticated algorithm is given in Appendix \ref{appsubsec:Gen:Alg}.
\subsection{PS classes in general}
\label{subsec:Gen:PSclasses}
The second point is the generalization of the PS classes $\mathcal{C}^\text{\dots}$,
which are the possible non-trivial intersections of the PS subsets $\mathcal{D}^\text{\dots}$.
Constructing these requires direct calculations for a given $n$, as was done in Sec.~\ref{subsec:Mixed:Classes}.
Let us divide the set of proper labels into two disjoint subsets, $\vvs{\alpha}$ and $\vvs{\beta}$;
then all the possible intersections of PS subsets can be labeled by such a pair,
which is called \emph{class label}, as
\begin{equation}
\mathcal{C}^{\vvs{\alpha},\vvs{\beta}}=
\bigcap_{\vs{\alpha}\in\vvs{\alpha}} \cmpl{\mathcal{D}^{\vs{\alpha}}}\cap \bigcap_{\vs{\beta}\in\vvs{\beta}}\mathcal{D}^{\vs{\beta}}.
\end{equation}
It can happen that $\mathcal{C}^{\vvs{\alpha},\vvs{\beta}}=\emptyset$ \emph{by construction,}
under which we mean that its emptiness follows from the inclusion hierarchy of PS subsets.
For example, if $\mathcal{D}^{\vs{\beta}}\subseteq\mathcal{D}^{\vs{\alpha}}$
for some $\vs{\alpha}\in\vvs{\alpha}$ and $\vs{\beta}\in\vvs{\beta}$, then the intersection above is identically empty.
The PS(S) classes for three subsystems in Sec.~\ref{subsec:Mixed:Classes}
were obtained by the use of this observation.
In this general framework, this observation is formulated as follows:
\begin{equation*}
\begin{split}
\mathcal{C}^{\vvs{\alpha},\vvs{\beta}}=\emptyset
\;&\overset{\text{(i)}}{\Longleftrightarrow}\;
\cmpl{\bigcup_{\vs{\alpha}\in\vvs{\alpha}} \mathcal{D}^{\vs{\alpha}} } \cap \bigcap_{\vs{\beta}\in\vvs{\beta}}\mathcal{D}^{\vs{\beta}}=\emptyset\\
\;&\overset{\text{(ii)}}{\Longleftrightarrow}\;
\bigcap_{\vs{\beta}\in\vvs{\beta}}\mathcal{D}^{\vs{\beta}} \subseteq \bigcup_{\vs{\alpha}\in\vvs{\alpha}} \mathcal{D}^{\vs{\alpha}}\\
\;&\overset{\text{(iii)}}{\Longleftarrow}\;
\exists \vs{\alpha}\in\vvs{\alpha}, \exists\vs{\beta}\in\vvs{\beta}\;:\;\mathcal{D}^{\vs{\beta}}\subseteq\mathcal{D}^{\vs{\alpha}}\\
\;&\overset{\text{(iv)}}{\Longleftrightarrow}\;
\exists \vs{\alpha}\in\vvs{\alpha}, \exists\vs{\beta}\in\vvs{\beta}\;:\; \vs{\beta}\preceq \vs{\alpha}.
\end{split}
\end{equation*}
[\textit{Equivalence (i)} comes from De Morgan's law $\cmpl{A}\cap\cmpl{B}=\cmpl{A\cup B}$.
\textit{Equivalence (ii)} comes from the identity $B\subseteq A\;\Leftrightarrow\;B\cap\cmpl{A}\equiv B\setminus A=\emptyset$.
\textit{Implication (iii)} comes from $B\subseteq A\;\Rightarrow\; B\cap B'\subseteq A\cup A'$.
\textit{Equivalence (iv)} is (\ref{eq:relationEquiv}).]
Implication (iii) is the point which makes it possible
to formulate the emptiness of PS classes by the use of \emph{labels only}.
That is still a question
whether implication (iii) can be replaced with a stronger one,
which leads to a condition involving only labels again.
(The problem is that we have no interpretations of $\cap$ and $\cup$ in the language of labels.)
\emph{Our first conjecture is that
implication (iii) above is the strongest one which leads to a condition involving only labels.}
Summarizing, we have
\begin{subequations}
\begin{equation}
\label{eq:emptyByConstruction}
\mathcal{C}^{\vvs{\alpha},\vvs{\beta}}=\emptyset
\quad\Longleftarrow\quad
\exists \vs{\alpha}\in\vvs{\alpha}, \exists\vs{\beta}\in\vvs{\beta}\;:\; \vs{\beta}\preceq \vs{\alpha}.
\end{equation}
If the right-hand side holds, then we say, according to the conjecture above,
that $\mathcal{C}^{\vvs{\alpha},\vvs{\beta}}$ is \emph{empty by construction}.
Since this implication is only one way,
it could happen that $\mathcal{C}^{\vvs{\alpha},\vvs{\beta}}=\emptyset$
for such class label $\vvs{\alpha},\vvs{\beta}$ for which the right-hand side does not hold.
However, we think that this cannot happen:
\emph{Our second conjecture is that there is an equivalence in (\ref{eq:emptyByConstruction});}
that is, all the PS classes which are not empty by construction are nonempty.
[This implies the first conjecture above,
but it can still happen that implication (iii) can be replaced by a stronger condition,
so the first conjecture is false.
Then the (\ref{eq:emptyByConstruction}) definition of the emptiness-by-construction changes,
and the second conjecture concerns this new definition.]
The motivation of this is the same as
in the tripartite case, (see at the end of Sec.~\ref{subsec:Mixed:Classes}),
where the PS classes conjectured to be non-empty
were obtained under the same assumptions.
An advantage of the formulation by the labeling constructions
is---roughly speaking---that by the use of that
``we have separated the \emph{algebraic} and the \emph{geometric part}'' of the problem
of nonemptiness of the classes.
At this point, it seems that
we have tackled all the \emph{algebraic} issues of the question,
and these conjectures cannot be proven without the investigation of the \emph{geometry} of $\mathcal{D}$,
more precisely, the geometry of the different kinds of $\mathcal{P}^{\alpha}$ extremal points.
The negation of (\ref{eq:emptyByConstruction}) leads to
\begin{equation}
\label{eq:nonEmptyByConstruction}
\mathcal{C}^{\vvs{\alpha},\vvs{\beta}}\neq\emptyset
\quad\Longrightarrow\quad
\forall \vs{\alpha}\in\vvs{\alpha},
\forall \vs{\beta}\in\vvs{\beta}\;:\; \vs{\beta}\npreceq \vs{\alpha},
\end{equation}
\end{subequations}
so if we obtain all $\vvs{\alpha},\vvs{\beta}$ class-labels
for which the right-hand side of this holds (``\emph{non-emptiness-by-construction}'')
then we will have all the nonempty classes,
together with some empty classes if the second conjecture does not hold.
Because of the nontrivial structure of the lattice of proper labels,
obtaining all the classlabels leading to
nonempty-by-construction classes
is also a nontrivial task.
The number of all the partitions of $n$ grows rapidly \cite{oeisA000110,oeisA000041},
which is only the number of the PS subsets of $\alpha$-separability $\mathcal{D}^{\alpha}$.
So the number of all the PS subsets $\mathcal{D}^{\vs{\alpha}}$ grows more rapidly,
and the number of all the PS classes $\mathcal{C}^{\vvs{\alpha},\vvs{\beta}}$ grows even more rapidly.
However, at least, it is finite.
\subsection{Indicator functions in general}
\label{subsec:Gen:Indicators}
The third point is the generalization of the indicator functions.
Let $F:\mathcal{D}(\mathcal{H}^K)\to\field{R}$ be a continuous function
for all $K\subset L$, that is, for all---also composite---subsystems.
The only condition on $F$ is
\begin{equation}
\label{eq:Fprop}
F(\varrho)\geq0, \quad\text{with equality if and only if $\varrho$ is pure},
\end{equation}
for example, the von Neumann entropy or any Tsallis or R\'enyi entropies are suitable.
(Note that the additional requirements of the features of
LU invariance, convexity, Schur-concavity,
additivity, being homogeneous polynomial, etc.,
are only optional;
they will not have been needed for the construction.)
For all $K\subset L$ subsystems,
let the following functions on pure states be defined:
\begin{equation}
\label{eq:fK}
\begin{split}
f_K&:\mathcal{P}\longrightarrow \field{R},\\
f_K&(\pi) = F(\pi_K),
\end{split}
\end{equation}
where again, $\pi=\cket{\psi}\bra{\psi}$, and $\pi_K=\tr_{\pcmpl{K}}(\pi)$, with $\pcmpl{K}=L\setminus K$.
Then, for the $k$-partite split $\alpha=L_1|L_2|\dots|L_k$,
$f_{L_r}$ identifies the bipartite split $L_r|\pcmpl{L_r}$, (where $\pcmpl{L_r}=L\setminus L_r$,) as
\begin{equation}
f_{L_r}(\pi)=0 \quad\Longleftrightarrow\quad
\pi\in \bigcup_{\beta\preceq L_r|\pcmpl{L_r}}\mathcal{P}^\beta,
\end{equation}
which is the consequence of (\ref{eq:Fprop}).
Note that $\alpha$ is the greatest element which is smaller than $L_r|\pcmpl{L_r}$ for all $r$.
Then the function
\begin{equation}
\label{eq:genIndicators}
f_\alpha(\pi)=
\sum_{r=1}^k f_{L_r}(\pi),
\end{equation}
has the ability to identify the $k$-partite split $\alpha$ as
\begin{equation}
\label{eq:genIndicatorsdef}
f_\alpha(\pi)=0 \quad\Longleftrightarrow\quad
\pi\in \bigcup_{\beta\preceq \alpha}\mathcal{P}^\beta.
\end{equation}
All non-negative $f_\alpha$ functions satisfying (\ref{eq:genIndicatorsdef})
are called \emph{$\alpha$-indicator functions},
not only the ones defined in (\ref{eq:genIndicators}).
The generalization of (\ref{eq:genIndicators}) for more-than-one partitions,
that is, for all labels, is defined as
\begin{equation}
\label{eq:genIndicatorsl}
f_{\vs{\alpha}}(\pi)= \prod_{\alpha\in\vs{\alpha}} f_{\alpha}(\pi),
\end{equation}
being the generalization of (\ref{eq:genNewPureLUinvs}).
It vanishes exactly for the convenient $\mathcal{P}^{\alpha}$s
\begin{equation}
\label{eq:genIndicatorsldef}
f_{\vs{\alpha}}(\pi)=0
\quad\Longleftrightarrow\quad
\pi\in\bigcup_{\alpha\in\vs{\alpha}} \bigcup_{\beta\preceq\alpha}\mathcal{P}^\beta.
\end{equation}
All non-negative $f_{\vs{\alpha}}$ functions satisfying (\ref{eq:genIndicatorsldef})
are called \emph{$\vs{\alpha}$-indicator functions},
not only the ones defined in (\ref{eq:genIndicatorsl}).
For example, the functions in (\ref{eq:newPureLUinvs})
were not constructed by (\ref{eq:genIndicatorsl}),
but still satisfy (\ref{eq:genIndicatorsldef}),
which is equivalent to the relavant part of Table \ref{tab:pureSLOCC3} for the three-qubit case.
Now, the vanishing of their convex roof extension
\begin{equation*}
\cnvroof{f}_{\vs{\alpha}}(\varrho)=
\min\sum_i p_i f_{\vs{\alpha}}(\pi_i)
\end{equation*}
identifies the PS sets:
\begin{equation}
\label{eq:genvanishing}
\cnvroof{f}_{\vs{\alpha}}(\varrho)=0
\quad\Longleftrightarrow\quad
\varrho\in \mathcal{D}^{\vs{\alpha}},
\end{equation}
being the generalization of (\ref{eq:vanishing}).
Indeed,
$\cnvroof{f}_{\vs{\alpha}}(\varrho)=0$
if and only if there exists a decomposition $\varrho=\sum_ip_i\pi_i$
such that $f_{\vs{\alpha}}(\pi_i)=0$ for all $i$
($f_{\vs{\alpha}}$ is non-negative),
which means that $\pi_i\in\bigcup_{\alpha\in\vs{\alpha}} \bigcup_{\beta\preceq\alpha}\mathcal{P}^\beta$
(\ref{eq:genIndicatorsldef}),
which means that $\varrho\in\mathcal{D}^{\vs{\alpha}}$.
\subsection{Entanglement-monotone indicator functions in general}
\label{subsec:Gen:monIndicators}
There is a possibility to choose indicator functions so that
they obey some axioms of \emph{entanglement measures} \cite{HorodeckiEntMeas}.
The most fundamental one of them is the monotonicity under LOCC \cite{HorodeckiEntMeas,VidalEntMon}.
A $\mu:\mathcal{D}\to\field{R}$ is \emph{(nonincreasing) monotone under LOCC}
if
\begin{subequations}
\begin{equation}
\label{eq:meas:mon}
\mu\bigl(\Lambda(\varrho)\bigr) \leq \mu(\varrho)
\end{equation}
for any LOCC transformation $\Lambda$,
which expresses that entanglement can not increase by the use of local operations
and classical communication.
A $\mu:\mathcal{D}\to\field{R}$ is \emph{nonincreasing on average under LOCC}
if
\begin{equation}
\label{eq:meas:average}
\sum_j p_j \mu(\varrho_j) \leq \mu(\varrho),
\end{equation}
where the LOCC is constituted as $\Lambda=\sum_j \Lambda_j$,
where the $\Lambda_j$s are the parts of the LOCC realizing the outcomes of selective measurements,
and $\varrho_j=\frac1{p_j}\Lambda_j(\varrho)$
with $p_j=\tr[\Lambda_j(\varrho)]$.
This latter condition is stronger than the former one
if the function is \emph{convex}:
\begin{equation}
\label{eq:meas:conv}
\mu\Bigl(\sum_j p_j \varrho_j\Bigr) \leq \sum_j p_j \mu(\varrho_j)
\end{equation}
\end{subequations}
for all ensemble $\{(p_j,\varrho_j)\}$,
which expresses that entanglement can not increase by mixing.
A $\mu:\mathcal{D}\to\field{R}$ is \emph{entanglement-monotone}
if (\ref{eq:meas:average}) and (\ref{eq:meas:conv}) hold for that \cite{VidalEntMon}.
There is common agreement \cite{Horodecki4}
that LOCC-monotonity (\ref{eq:meas:mon}) is the only necessary postulate
for a function to be an \emph{entanglement measure.}
However, the stronger condition (\ref{eq:meas:average})
is often satisfied too,
and it is often easier to prove.
This holds also for our case.
If $\mu$ is defined only for pure states
$\mu:\mathcal{P}\to\field{R}$,
then only (\ref{eq:meas:average}) makes sense, whose restriction is
\begin{equation}
\label{eq:averagePure}
\sum_i p_i \mu(\pi_i) \leq \mu(\pi).
\end{equation}
Here $\{(p_i,\pi_i)\}$ is the pure ensemble
generated by all the Kraus operators of all $\Lambda_j$s
from the input state $\pi$.
(Not all $\pi_i$ members of the ensemble are accessible physically,
only the outcomes of the LOCC, which are formed by partial mixtures of this ensemble \cite{HorodeckiEntMeas}.
Mathematically, however, we can use the pure ensemble, which make the construction much more simple.)
If we have such a function $\mu:\mathcal{P}\to\field{R}$,
(\ref{eq:meas:average}) holds for its convex roof extension \cite{VidalEntMon,HorodeckiEntMeas}:
\begin{equation}
\label{eq:averageConvRoof}
\sum_i p_i \mu(\pi_i) \leq \mu(\pi)
\quad\Longrightarrow\quad
\sum_i p_i \cnvroof{\mu}(\varrho_i) \leq \cnvroof{\mu}(\varrho).
\end{equation}
Since the convex roof extension of a function is convex [Eq.~\ref{eq:meas:conv}]
(moreover, it is the largest convex function taking the same values for pure states as the original function does, \cite{UhlmannOptimalDecomp}),
$\cnvroof{\mu}(\varrho)$ is also entanglement-monotone.
\setcounter{txtitem}{0}
Now, we construct indicator functions which are entanglement-monotone.
(These are denoted $m$ in contrast with the general $f$s.)
This is carried out in four steps.
\refstepcounter{txtitem}\thetxtitem{} It has also been shown in \cite{VidalEntMon,HorodeckiEntMeas} that
if $F:\mathcal{D}(\mathcal{H}^K)\to\field{R}$ is unitary invariant and concave,
then the $f_K$ functions defined in (\ref{eq:fK}) are non increasing on average for pure states,
that is, obey (\ref{eq:averagePure}).
So let
\begin{equation}
\label{eq:mK}
m_K(\pi) = M(\pi_K)
\end{equation}
with $M:\mathcal{D}(\mathcal{H}^K)\to\field{R}$ \emph{vanishing if and only if the state is pure,} as before,
but now we demand also \emph{unitary invariance} and \emph{concavity.}
The von Neumann entropy (\ref{eq:Neumann}),
the Tsallis entropies (\ref{eq:Tsallis}) for all $q>0$,
and the R\'enyi entropies (\ref{eq:Renyi}) for all $0<q<1$
are known to be concave \cite{BengtssonZyczkowski}, and all of them are unitary invariant.
\refstepcounter{txtitem}\thetxtitem{} Clearly, the functions obeying (\ref{eq:averagePure}) form a cone;
that is, their sums and multiples by non-negative real numbers also obey (\ref{eq:averagePure}),
so we can conclude that the sums of the functions $m_K$ also obey (\ref{eq:averagePure}).
Here, instead of the original sums in (\ref{eq:genIndicators}),
we introduce the \emph{arithmetic mean} of the $m_{L_r}$ functions,
\begin{equation}
\label{eq:monIndicators}
m_\alpha(\pi)=\frac1k \sum_{r=1}^k m_{L_r}(\pi),
\end{equation}
which are also indicator functions, since they obey (\ref{eq:genIndicatorsdef}).
[The factor $1/k$ is not really important,
but the next step, and
in the three-qubit case $y=1/3(s_1+s_2+s_3)$ from
(\ref{eq:newPureLUinvs:y})--(\ref{eq:newPureLUinvs:sa})
motivate the use of mean values.]
\refstepcounter{txtitem}\thetxtitem{} The only problem we face here is that
the set of functions obeying (\ref{eq:averagePure}) is not closed under multiplication,
which is the case of the $f_{\vs{\alpha}}$ functions of (\ref{eq:genIndicatorsl}).
This is related to the fact that the product of two concave functions is not concave in general.
Moreover, a recent result of Eltschka \textit{et.~al.}~suggests
that functions obeying (\ref{eq:averagePure})
cannot be of arbitrary high degree
(see Theorem I in \cite{EltschkaetalEntMon},
concerning a special class of functions),
so we make a trial of such a combination which does not change the degree,
but still fulfils the conditions (\ref{eq:genIndicatorsldef}).
The \emph{geometric mean} will be proven to be suitable,
which is just a root of the product given in (\ref{eq:genIndicatorsl})
\begin{equation}
\label{eq:monIndicatorsl}
m_{\vs{\alpha}}(\pi)= \Bigl[\prod_{\alpha\in\vs{\alpha}} m_{\alpha}(\pi)\Bigr]^{1/l},
\end{equation}
where $l=\abs{\vs{\alpha}}$, the number of $m_{\alpha}$s in the product.
These functions obviously obey (\ref{eq:genIndicatorsldef})
and also (\ref{eq:averagePure}), which latter is proven in Appendix \ref{appsubsec:Gen:geomMean}.
\refstepcounter{txtitem}\thetxtitem{} Now, the function $m_{\vs{\alpha}}(\pi)$ of (\ref{eq:monIndicatorsl})
is nonincreasing on average for pure states (\ref{eq:averagePure})
so
\begin{equation*}
\cnvroof{m}_{\vs{\alpha}}(\varrho)=
\min\sum_i p_i m_{\vs{\alpha}}(\pi_i)
\end{equation*}
is also non increasing on average (\ref{eq:meas:average})
[because of (\ref{eq:averageConvRoof})],
so it is entanglement-monotone
and also identifies the PS subsets
\begin{equation}
\cnvroof{m}_{\vs{\alpha}}(\varrho)=0
\quad\Longleftrightarrow\quad
\varrho\in \mathcal{D}^{\vs{\alpha}},
\end{equation}
as in (\ref{eq:genvanishing}).
\section{Summary and remarks}
\label{sec:Sum}
In this paper we have constructed the complete PS classification of multipartite quantum states
by the PS classes arising from the PS subsets (\ref{eq:genDsets:alphal}),
together with necessary and sufficient conditions for the identification of the PS classes
through the necessary and sufficient conditions for the identification of the PS subsets (\ref{eq:genvanishing})
by indicator functions arising as convex roof extensions of the pure-state indicator functions (\ref{eq:genIndicatorsl}).
The indicator functions can be constructed so as to be entanglement-monotone (Sec.~\ref{subsec:Gen:monIndicators}).
A side track is the PSS classification of three-qubit states,
(Sec.~\ref{sec:Mixed}),
where a different set of indicator functions
has been obtained (\ref{eq:newPureLUinvs}), (\ref{eq:vanishing})
by the use of the FTS approach of three-qubit pure-state entanglement.
\setcounter{txtitem}{0}
Now, we list some remarks and open questions, first about the general case.
\refstepcounter{txtitem}\thetxtitem{} As was mentioned before,
this PS classification scheme is an extension of the classification based on $k$-separability and $\alpha_k$-separability
given by Seevinck and Uffink \cite{SeevinckUffinkMixSep},
which is the extension of the classification dealing only with $\alpha_k$-separability
given by D\"ur and Cirac \cite{DurCiracTarrach3QBMixSep,DurCiracTarrachBMixSep}.
\refstepcounter{txtitem}\thetxtitem{} The nonemptiness of the new classes was only conjectured.
More fully, we could not give necessary and sufficient condition for the nonemptiness of the PS classes
in the purely algebraic language of labels.
Probably, methods from geometry or calculus would be needed to solve this puzzle
(Sec.~\ref{subsec:Gen:PSclasses}).
\refstepcounter{txtitem}\thetxtitem{} In close connection with this,
a further geometry-related conjecture could be drafted about the nonempty classes:
They are of nonzero measure.
It is known in the bipartite case that
the set of separable states is of nonzero measure \cite{Acinetal3QBMixClass,BengtssonZyczkowski},
which can motivate this conjecture.
\refstepcounter{txtitem}\thetxtitem{} We have given also the necessary and sufficient criteria of the classes.
This was done by convex roof extension,
which is a general method for the identification of convex subsets,
having advantages and disadvantages.
\refstepcounter{txtitem}\thetxtitem{} First of all, convex roof extensions are hard to calculate.
However, necessary and sufficient criteria for the detection of convex subsets
seem always to be hard to calculate,
since they always contain an optimization problem,
such as finding a suitable witness,
or positive map \cite{HorodeckiPosMapWitness},
or symmetric extension \cite{Dohertycrit1,Dohertycrit2,Dohertycrit3},
or local spin measurements \cite{SeevinckUffinkMixSep},
or detection vector \cite{HuberkCrit,HuberCrit2},
or local bases \cite{GuhneSevinckCrit}, etc.
(The latter three are for only necessary but not sufficient criteria.
For further references, see the reference lists of \cite{SzalaySepCrit,GuhneTothEntDet}.)
These optimization problems have no solutions in a closed form in general cases.
\refstepcounter{txtitem}\thetxtitem{} Another disadvantage of convex roof extensions
is that this is a ``clearly theoretical'' method,
under which we mean that
the full tomography of the state is needed, then the criteria are applied by computer.
The majority of the other known criteria share this disadvantage.
Exceptions are the criteria by witnesses \cite{HorodeckiPosMapWitness}
and by local spin measurements \cite{SeevinckUffinkMixSep}
(only necessary but not sufficient),
where the criteria can be used in the laboratory, by the tuning of measurement settings.
However, the optimization still has to be done by the measuring apparatus.
\refstepcounter{txtitem}\thetxtitem{} An advantage of the convex roof extension
is that it works independently of the dimensions of the subsystems,
so the criteria by that work for arbitrary dimensions.
However, the numerical optimization depends strongly on the rank of the state,
which can be high if the dimension is high,
resulting slow convergence.
\refstepcounter{txtitem}\thetxtitem{} The greatest advantage of the criteria given by convex roof constructions---at least for us---is that
they have a very transparent structure;
they reflect clearly the structure of the PS classes by construction [see (\ref{eq:genvanishing})].
Now, we turn to some remarks about the three-qubit case,
which is although particular but very important.
\refstepcounter{txtitem}\thetxtitem{} First, note that the FTS approach of three-qubit entanglement
\cite{BorstenetalFreudenthal3QBEnt}
is coming from the famous Black Hole/Qubit Correspondence \cite{BorstenDuffLevayBHQB}.
The FTS approach has turned out to be fruitful also in some other
fields of quantum entanglement \cite{PeterPetiFTS,PetiPeterFTS}.
There are some advantages of the FTS approach in the three-qubit case,
although, as we have seen, criteria for the PS subsets can be obtained without the use of that.
\refstepcounter{txtitem}\thetxtitem{} Since the convex roof extensions of polynomials can be known to be semialgebraic functions \cite{PetiPriv,ChenDjokovicSemialg},
it can be useful to use LU-invariant homogeneous polynomials for the identification of the classes.
Then we have polynomials of this kind
from (\ref{eq:newPureLUinvs}) coming from the FTS approach,
and from (\ref{eq:genNewPureLUinvs}) with the Tsallis entropy for $q=2$ coming from the general constructions.
The former ones are of lower degree, which may lead to more simple convex roof extensions.
\refstepcounter{txtitem}\thetxtitem{} Moreover,
this holds also for the functions $g_a$ in the general tripartite case
if Raggio's conjecture holds (Sec.~\ref{subsec:GenThreePart:FTS}).
\refstepcounter{txtitem}\thetxtitem{} A little side result of our work is that
Raggio's conjecture holds
for two-qubit mixed states which are, at the most, of rank $2$.
\refstepcounter{txtitem}\thetxtitem{} An interesting question is
as to whether all pure-state indicator functions can be obtained
without products of local entropies,
but using only linear combinations of them.
\refstepcounter{txtitem}\thetxtitem{} We note that there are also recent attempts to study the general structure
of LU invariant homogeneous polynomials \cite{HWLUA,HWWLUA,PetiLUA1,PetiLUA23,SzDeg6}.
Looking for convex roof extensions in the language of LU-invariant polynomials
would be an interesting direction of research.
\refstepcounter{txtitem}\thetxtitem{} As a disadvantage of the FTS approach,
we have to mention that
some of the indicator functions coming from the FTS approach
are not nonincreasing on average (\ref{eq:averagePure}),
namely $g_a$ and $t$ given in (\ref{eq:newPureLUinvs:ga}) and (\ref{eq:newPureLUinvs:t}).
[Counterexamples for (\ref{eq:averagePure}) can be constructed for these functions by direct calculation.]
Finally, we would like to
summarize some
arguments for the relevance of the extension of the
Seevinck-Uffink classification.
\refstepcounter{txtitem}\thetxtitem{} We can get back the classification given by Seevinck and Uffink
if we simply forget about the sets $\varrho\in\mathcal{D}^{\twoprt{b|ac}{c|ab}}$,
and the functions $\cnvroof{g}_a(\varrho)$.
However, the appearance of the $g_a(\psi)$ polynomials is natural
in the light of the formulas (\ref{eq:newPureLUinvs:y}), (\ref{eq:newPureLUinvs:sa}), and (\ref{eq:newPureLUinvs:ga}).
This motivates the introduction of the sets $\varrho\in\mathcal{D}^{\twoprt{b|ac}{c|ab}}$
to the classification.
\refstepcounter{txtitem}\thetxtitem{} The $g_a$ functions are interesting in themselves (see Appendix \ref{appsubsec:explicit:WoottersConc}).
For all non-GHZ vectors, they coincide with
the Wootters concurrence-squared of two-qubit subsystems (\ref{eq:Conc3QB}).
However, note that the Wootters concurrences of two-qubit subsystems
are not suitable for being indicator functions,
since they can be zero also for GHZ-type vectors,
so they do not fulfill the last row of $g_a$ columns of Table \ref{tab:pureSLOCC3}.
For example, for the usual GHZ state (\ref{eq:GHZ}),
the Wootters concurrences of two-qubit subsystems are zero.
\refstepcounter{txtitem}\thetxtitem{} In Sec.~\ref{sec:Xmpl}
we have shown states
which are definitely in classes that are different in the extended classification.
This is another reason for using also the sets $\varrho\in\mathcal{D}^{\twoprt{b|ac}{c|ab}}$
in the classification.
In closing, there is an important question, which can be of research interest as well.
\refstepcounter{txtitem}\thetxtitem{} The PS classification is about the following issue:
``From which kinds of pure entangled states can a given state be \emph{mixed}?''
Another issue,
which is equivalently important from the point of view of quantum computation
but which we have not dealt with, is
``Which kinds of pure entangled states can be \emph{distilled out} from a given state?''
What can be said about the latter?
\begin{acknowledgments}
We thank P\'eter L\'evay and P\'eter Vrana for helpful discussions
and D\'enes Petz for the reference of some papers.
This work was supported by the New Hungary Development Plan
(Project ID: T\'AMOP-4.2.1/B-09/1/KMR-2010-0002).
\end{acknowledgments}
|
{
"timestamp": "2012-09-27T02:03:27",
"yymm": "1206",
"arxiv_id": "1206.6253",
"language": "en",
"url": "https://arxiv.org/abs/1206.6253"
}
|
\section{INTRODUCTION}\label{sec:intro}
In 1973 Capper and Duff~\cite{1}~\cite{2}~\cite{3}~\cite{4} discovered that the correlation function of
the energy-momentum tensor for a massless particle theory
\begin{equation}
\Pi _{\mu \nu \rho \sigma }(p)
=\int d^{u}xe^{ipx}\textless T_{\mu \nu }(x) T_{\rho \sigma }(o)\textgreater
| _{g_{\mu \nu } =\delta _{\mu \nu }}
\label{1.1}
\end{equation}
although it obeyed as classically expected the conservation of 4-momentum Ward-identities
\begin{equation}
p^{\mu }\Pi _{\mu \nu \rho \sigma }(p)=0
\end{equation}
does not obey the expected tracelessness of the energy-momentum tensor, since indeed the finite part are not ``traceless'' i.e.\
\begin{equation}
\Pi ^{\mu }_{\> \> \mu \rho \sigma }(\mbox{finite part})\neq 0.
\end{equation}
Classically of course one expects that for a theory of massless particles the energy-momentum tensor should be traceless $T^{\mu }_{\> \> \mu }=0$ and so obtaining $T^{\mu }_{\> \> \mu }\neq 0$ in any correlation function signals an \underline{anomaly}.
In the present work we hope to throw some light onto the question of how such an anomaly comes about physically. We have chosen as our technology to extract the $T^{\mu }_{\> \> \mu }$ which is the Hamiltonian even if there is no interaction between the massless particles but they just are in some gravitational non-trivial background that can act on $T_{\rho \sigma }(x)$ there appears this effect of the trace of the $T_{\mu \nu }$ not being zero. This effect is referred to as the Weyl-anomaly.
Really the reason for this $T^{\mu }_{\> \> \mu }\neq 0$ being an ``anomaly'' is that a theory with only massless particles formally classically obey symmetry under scaling and that $T^{\mu }_{\> \> \mu }=0$ can be extracted from the requirement of scaling invariance (Dilatation symmetry = scaling symmetry.)
The generator for dilatation symmetry is
\begin{equation}
D=\int X^{\mu }T_{\mu 0}d^{3}\vec{x}
\end{equation}
or the current for this $D$ is
\begin{equation}
j_{D \mu }(x)=x^{\rho}T_{\rho \mu }(x)
\end{equation}
In fact we shall use as this background --- which could act on the $T_{\mu \nu}$ ---
\begin{equation}
\eta _{\mu \nu }\longrightarrow g_{\mu \nu}=\eta ' _{\mu \nu}
=e^{\Omega }\eta _{\mu \nu }
\end{equation}
where $\Omega $ has been used as an ansatz for the Weyl transformation function.
In the present article we shall only perform our calculation in 1+1 dimensions --- but with a Euclideanized technique so that we are really using 2+0 dimentions ---.
When we now consider the vacuum to vacuum transition in the by some Weyl transformation modified.
In the following section 2 fermion and Dirac Sea~\cite{5} we shall introduce some notation for fermions in a background gravitational field obtained from that space-time by a Weyl transformation and we shall describe the Dirac sea. In \underline{section 3} we then prepare for how to extract the vacuum expectation value of the trace $T^{\mu }_{\> \> \mu }$ of the energy momentum tensor by varying the vacuum to vacuum $S$-matrix element in a background metric $e^{2\Omega }\eta _{\mu \nu}$. In \underline{section 4} we describe our cut-off procedure by cutting-off the (negative) high energy part of the Dirac sea by compensating its contribution to say, energy and momentum by means of a ghost-like particles, which is really a massive boson following a fermion like equation of motion. Using this cut-off procedure we compute in \underline{section 5} the correlation function for the Weyl transformation background field $\Omega (x)$ which according to section 3 is required to achieve for instance the Weyl anomaly expression for $T^{\mu }_{\> \> \mu}$.
In section 6 we then conclude and outlook resume.
\section{NOTATIONS AND DIRAC SEA}\label{sec:notation}
The model we shall consider is a two component --- thus Dirac --- fermion in a gravitational field background which is though assumed to be conformally flat.
It is in reality given by a Weyl transformation having acted on a flat space-time. I.e.\ the metric is of the form
\begin{equation}
g_{\mu \nu }=e^{2\Omega }\eta _{\mu \nu }.
\end{equation}
Here $\eta _{\mu \nu }$ is the flat metric. We then consider a two component complex fermion
\begin{equation}
\Psi =\begin{pmatrix} \overline{\psi} \\ \psi \end{pmatrix}
\end{equation}
on the Euclidean 2 dimensional space with coordinates \ $\vec{x}=(x^{0}, x^{1})=(t, x)$, so that \\ $-\infty < t < \infty $ \ and \ $0 \le x^{1} < 2\pi $. We in fact require periodicity with period $2\pi$ and consider the cylindrical space-time $S^{1} \times R$.
In the present paper we adopt notations such that Roman indices $i,j,\cdots$ take the component of the flat space while Greek ones $\mu , \nu \cdots $ take those of the curved space. The diffeomorphism invariant action reads
\begin{eqnarray}
S&=&\frac{1}{2\pi }\int d^{2}\vec{x} \sqrt{g} \Psi ^{+}(\vec{x})\gamma ^{0}\gamma ^{i}e^{\mu}_{i}\times \bigtriangledown _{\mu}\Psi (\vec{x}) \nonumber\\
&=&\frac{1}{4\pi }\int d^{2}\vec{x} \sqrt{g} \{ \Psi ^{+}(\vec{x})\gamma ^{0}\gamma ^{i}\times e^{\mu}_{i}(\vec{x}) \bigtriangledown _{\mu}\Psi (\vec{x}) \nonumber\\
&-&\bigtriangledown _{\mu }\Psi ^{+}(\vec{x})\gamma ^{0}\gamma ^{i}e^{\mu}_{i}(\vec{x}) \Psi (\vec{x})\}
\label{2.3}
\end{eqnarray}
By making use of the diffeomorphism the metric tensor $g_{\mu \nu }$ can be made into conformal flat form
\begin{equation}
g_{\mu \nu }=e^{2\Omega (\vec{x})}\eta _{\mu \nu }
\end{equation}
Hereafter we assume that as $t \to \pm \infty $ space-time becomes flat, i.e.\ $ \lim_{t \to {\pm \infty} } g_{\mu \nu} = \eta _{\mu \nu }$ and
\begin{equation}
\lim_{t \to {\pm \infty} } \Omega (\vec{x}) = 0.
\label{2.5}
\end{equation}
Since we deal with conformal transformation we introduce the complex coordinate
\begin{eqnarray}
z&=&e^{x+ix^1}, \nonumber\\
\overline{z}&=&e^{x^0-ix^1}.
\end{eqnarray}
That purpose we need to introduce zweibeins $e^{\mu }_{i}(x)$ --- or their inverses $f^{i}_{\mu }(x)$ ---. We may choose to specify these zweibeins to be diagonal as a ``gauge choice'' so that
\begin{equation}
e^{0}_{1}=e^{1}_{0}
\end{equation}
and the requirement
\begin{equation}
\eta _{ij}e^{i}_{\mu }e^{j}_{\nu }=g_{\mu \nu }=e^{2\Omega }\eta _{\mu \nu}
\label{2.8}
\end{equation}
would then lead to the choice
\begin{equation}
e^{1}_{1}=e^{0}_{0}=e^{\Omega }
\end{equation}
and
\begin{equation}
f^{1}_{1}=f^{0}_{0}=e^{-\Omega }
\end{equation}
for the inverse $f^{\mu }_{i}$.
Then we can write the action
\begin{eqnarray}
S&=&\frac{1}{2\pi }\int d^{2}\vec{x} \sqrt{g} \Psi ^{+}(\vec{x})\gamma ^{0}\gamma ^{i}e^{\mu}_{i}\times \bigtriangledown _{\mu}\Psi (\vec{x}) \nonumber\\
&=&\frac{1}{4\pi }\int d^{2}\vec{x} \sqrt{g} \{ \Psi ^{+}(\vec{x})\gamma ^{0}\gamma ^{i}\times e^{\mu}_{i}(\vec{x}) \bigtriangledown _{\mu}\Psi (\vec{x}) \nonumber\\
&-&\bigtriangledown _{\mu }\Psi ^{+}(\vec{x})\gamma ^{0}\gamma ^{i}e^{\mu}_{i}(\vec{x}) \Psi (\vec{x})\}.
\end{eqnarray}
We may rewrite this action into the form
\begin{eqnarray}
S&=&\frac{1}{4\pi }\int d^{2}\vec{x} e^{2\Omega }\cdot
\Bigl\{(\overline{\psi }^{+}, \psi )(\vec{x})e^{-\Omega }
\begin{pmatrix} \bigtriangledown _{2}+i\bigtriangledown _{1} & 0 \\ 0 & \bigtriangledown _{2}-i\bigtriangledown _{1} \end{pmatrix}
\begin{pmatrix} \overline{\psi } \vec{x} \\ \psi \vec{x} \end{pmatrix} \nonumber\\
&-&(\bigtriangledown _{2}+i\bigtriangledown _{1})\overline{\psi} ^{+}\vec{x}e^{-\Omega }\overline{\psi} (\vec{x}) \nonumber\\
&-&(\bigtriangledown _{2}-i\bigtriangledown _{1}) \psi e^{-\Omega }\psi
(\vec{x}) \Bigr\} \nonumber\\
&=&\frac{1}{4\pi }\int d^{2}\vec{x}\Bigl(\overline{\psi }^{+} e^{\Omega }(\bigtriangledown _{2}+i\bigtriangledown _{1})\overline{\psi } \nonumber\\
&+& \psi ^{+}e^{\Omega }(\bigtriangledown _{2}-i\bigtriangledown _{1})\psi \nonumber\\
&-&\bigl((\bigtriangledown _{2}+i\bigtriangledown _{1})\overline{\psi }^{+}\bigr) e^{+\Omega }\overline{\psi } \nonumber\\
&+& (\bigtriangledown _{2}-i\bigtriangledown _{1})\psi ^{+} e^{\Omega }\psi \Bigr)
\label{2.12}
\end{eqnarray}
Here we have used the Weyl representation for $\gamma ^{\mu }$-matrices
\begin{eqnarray}
\gamma ^{1}=\begin{pmatrix} 0 & i \\ -i & 0 \end{pmatrix} \nonumber\\
\gamma ^{2}=\begin{pmatrix} 0 & -1 \\ -1 & 0 \end{pmatrix}
\end{eqnarray}
and the above expressions of zweibeins and metric in terms of $\Omega (\vec{x})$:
\begin{eqnarray}
f^{\mu }_{i}&=&\delta ^{\mu }_{i}e^{-\Omega } \nonumber\\
e^{i}_{\mu }&=&\delta ^{i}_{\mu }\cdot e^{\Omega } \nonumber\\
\mbox{and}\ \ g_{\mu \nu}&=&e^{2\Omega }\eta _{\mu \nu}.
\end{eqnarray}
We may rewrite the action (\ref{2.12}) into the form
\begin{eqnarray}
S&=&\frac{1}{4\pi }\int d^{2}\vec{x} \Bigl[ \bigl\{e^{\frac{1}{2}\Omega}\psi ^{+} \bigr\}
(\bigtriangledown _{2}+i\bigtriangledown _{1})(e^{\frac{1}{2}\Omega}\psi)\nonumber\\
&-&(\bigtriangledown _{2}+i\bigtriangledown _{1})
(e^{\frac{1}{2} \Omega}\psi^{+})
\times (e^{\frac{1}{2} \Omega}\psi )+ \bigl\{e^{\frac{1}{2}\Omega} \overline{\psi} ^{+} \bigr\}
(\bigtriangledown _{2}-i\bigtriangledown _{1})\bigl\{e^{\frac{1}{2}\Omega }\overline{\psi}\bigr\} \nonumber\\
&-&(\bigtriangledown _{2}-i\bigtriangledown _{1}) (e^{\frac{1}{2} \Omega}\overline{\psi}^{+} )e^{\frac{1}{2} \Omega}\overline{\psi}\Bigr]
\label{2.15}
\end{eqnarray}
Here the prefactors of $\psi $ and $\overline{\psi }$ are
\begin{eqnarray}
e^{\frac{1}{2} \Omega} \nonumber\\
\mbox{and} \ \ e^{\frac{1}{2} \Omega}
\end{eqnarray}
respectively.
In this form the action is immediately seen to possess invariance under further Weyl transformation given by
\begin{eqnarray}
\Omega (\vec{x}) \to \Omega' (\vec{x}) = \Omega (\vec{x}) + E(\vec{x})\nonumber\\
\psi (\vec{x}) \to \psi' (\vec{x}) \to e^{-\frac{1}{2} E(\vec{x})}\psi (\vec{x}) \nonumber\\
\overline{\psi } (\vec{x}) \to \overline{\psi }(\vec{x})' = e^{-\frac{1}{2} E(\vec{x})} \overline{\psi }(\vec{x})
\end{eqnarray}
Here $E(\vec{x})$ is the function describing the Weyl transformation.
Noticing that in the equation (\ref{2.15}) the derivatives from Leibnitz rule acting on $\Omega $ drop out (cancel) we see that the equation of motion would be like if $\Omega $ were constant and in any case we find the equations of motion
\begin{eqnarray}
(\bigtriangledown _{2}+i\bigtriangledown _{1})(e^{\frac{1}{2}\Omega}\psi)=0\nonumber\\
(\bigtriangledown _{2}-i\bigtriangledown _{1})(e^{\frac{1}{2}\Omega}\psi ^{+})=0\nonumber\\
(\bigtriangledown _{2}-i\bigtriangledown _{1})(e^{\frac{1}{2}\Omega}\overline{\psi })=0\nonumber\\
(\bigtriangledown _{2}-i\bigtriangledown _{1})(e^{\frac{1}{2}\Omega}\overline{\psi} ^{+})=0
\end{eqnarray}
if you vary independently $\psi, \ \psi ^{+}, \ \overline{\psi } \ \mbox{and} \ \overline{\psi} ^{+}$. In any case you easily get by defining the ``tilded'' fields
\begin{eqnarray}
\widetilde{\psi } (\vec{x})&=&e^{\frac{1}{2}\Omega (\vec{x}) }\psi (\vec{x}) \nonumber\\
\widetilde{\psi }^{+} (\vec{x})&=&e^{\frac{1}{2}\Omega (\vec{x}) }\psi^{+} (\vec{x}) \nonumber\\
\widetilde{\overline{\psi }}(\vec{x})&=&e^{\frac{1}{2}\Omega (\vec{x}) }\overline{\psi } (\vec{x})
\nonumber\\
\widetilde{\overline{\psi }}^{+}(\vec{x})&=&e^{\frac{1}{2}\Omega (\vec{x}) }\overline{\psi }^{+} (\vec{x})
\label{2.19}
\end{eqnarray}
the (seemingly) $\Omega $-independent action
\begin{eqnarray}
S&=&\frac{1}{4\pi }\int d^{2}\vec{x} \Bigl[ \widetilde{\psi }^{+}(\bigtriangledown _{2}+i\bigtriangledown _{1})\widetilde{\psi }-\bigl( (\bigtriangledown _{2}+i\bigtriangledown _{1})\widetilde{\psi }^{+}\bigr)\cdot \widetilde{\psi }\nonumber\\
&+&\widetilde{\overline{\psi }}^{+} (\bigtriangledown _{2}-i\bigtriangledown _{1})\widetilde{\overline{\psi }}-\bigl( (\bigtriangledown _{2}-i\bigtriangledown _{1})\widetilde{\overline{\psi }}^{+}\bigr)\cdot \overline{\psi }\Bigr].
\label{2.20}
\end{eqnarray}
The a priori covariant derivatives $\bigtriangledown_{\mu }$ are when acting on the effective scalar --- the fermion $\psi , \ \overline{\psi }$ etc.\ and $\Omega $ --- just the usual derivative (operators) with respect to
\begin{equation}
x^{\mu }, \ \bigtriangledown_{\mu }\sim \partial_{\mu}, \\ \bigtriangledown _{2}+i\bigtriangledown _{1}=\partial_{2}+i\partial_{1} \ \mbox{etc}.
\end{equation}
If we choose as new variables
\begin{eqnarray}
\ln{z}&=&x^{2}+ix^{1} \nonumber\\
and \ \ \ln{\overline{z}}&=&x^{2}-ix^{1}
\label{2.22}
\end{eqnarray}
and then
\begin{eqnarray}
\bigtriangledown _{2}+i\bigtriangledown _{1}&=&\frac{\partial}{\partial \ln{z}} \nonumber\\
\bigtriangledown _{2}-i\bigtriangledown _{1}&=&\frac{\partial}{\partial \overline{\ln{z.}}}
\end{eqnarray}
The equations of motion for $\widetilde{\overline{\psi }}$ and $\widetilde{\psi }$ becomes that they \emph{only} depend on respectively $\ln{z}=x^{2}+ix^{1}$ and $\ln{\overline{z}}=x^{2}-ix^{1}$.
We simply find right and left mover fields respectively $\widetilde{\psi }$ and $\widetilde{\overline{\psi }}$ and with the compactification of the space coordinate $x^{1}$ to have period
\begin{equation}
x^{1}\simeq x^{1}+2\pi
\end{equation}
We get the quantization of momentum $p^{1}$ and thus energy to
\begin{equation}
p^{1}=n \ \ \ (n:\mbox{integer}) \nonumber
\end{equation}
In fact
\begin{equation}
E=\pm p^{1}
\end{equation}
Filling the Dirac Sea as here formulated in $\widetilde{\psi }$ , $\widetilde{\overline{\psi }}$-notation thus looks simply like filling states in Fig.\ref{fig:1} into the negative energy parts of the two line-dispersion laws.
\begin{figure}[htb]
\begin{center}
\begin{picture}(200,190)
\put(10,100){\vector(1,0){180}}
\put(100,10){\vector(0,1){180}}
\put(193,97){$p$}
\put(103,190){$E$}
\multiput(12.5,12.5)(15,15){12}{\line(1,1){10}}
\multiput(12.5,187.5)(15,-15){12}{\line(1,-1){10}}
\multiput(100,100)(-7.5,-7.5){12}{\circle*{4}}
\multiput(100,100)(7.5,-7.5){12}{\circle*{4}}
\put(135,150){III}
\put(45,60){IV}
\put(55,150){I}
\put(145,60){II}
\put(25,175){left mover}
\put(125,175){right mover}
\end{picture}
\end{center}
\caption[Dirac Sea]{The particle distribution of the Dirac Sea} \label{fig:1} \end{figure}
According to say equation (\ref{2.12}) the canonically conjugate to say equation $\overline{\psi }$ is
\begin{equation}
\frac{\partial L}{\partial (\partial_{2}\overline{\psi })}
=\frac{1}{4\pi }e^{\Omega }\overline{\psi }^{+}
\end{equation}
where
\begin{eqnarray}
L&=&\frac{1}{4\pi }\bigl( \overline{\psi }^{+}e^{\Omega }(\bigtriangledown _{2}+i\bigtriangledown _{1})\overline{\psi }\bigr)+{\psi }^{+}e^{\Omega }(\bigtriangledown _{2}-i\bigtriangledown _{1}){\psi }\nonumber\\
&-&\bigl( (\bigtriangledown _{2}+i\bigtriangledown _{1})\overline{\psi }^{+}\bigr)e^{\Omega }\overline{\psi }+(\bigtriangledown _{2}-i\bigtriangledown _{1}){\psi }^{+}e^{\Omega }{\psi }.
\end{eqnarray}
>From there we then obtain the anti-commutation relations for the second quantized fields
\begin{eqnarray}
\bigl\{ \psi (t, x^{1}), \psi (t, {x'^{1}})\bigr\} &=&e^{\Omega(t, -x^{1}) }\delta (x^{1}-{x'^{1}})\nonumber\\
\bigl\{ \overline{\psi} (t, x^{1}), \overline{\psi} (t, {x'}^{1})\bigr\}&=&e^{\Omega(t, x^{1}) }\delta (x^{1}-{x'}^{1})
\end{eqnarray}
These anti-commutation rules match with the $\Omega$-independent rules for the tilted fields $\widetilde{\Psi },\ \widetilde{\overline{\Psi }} $ where they are defined from equation (\ref{2.19})
\begin{eqnarray}
\widetilde{\Psi }(\vec{x})&=&e^{\frac{1}{2}\Omega (\vec{x})}{\Psi }(\vec{x})\nonumber\\
\widetilde{\overline{\Psi }}(\vec{x})&=&e^{\frac{1}{2}\Omega (\vec{x})}\overline{\Psi }(\vec{x})\nonumber\\
\widetilde{\Psi }^{+}(\vec{x})&=&e^{\frac{1}{2}\Omega (\vec{x})}{\Psi }^{+}(\vec{x})\nonumber\\
\widetilde{\overline{\Psi }}^{+}(\vec{x})&=&e^{\frac{1}{2}\Omega (\vec{x})}\overline{\Psi }^{+}(\vec{x}),
\end{eqnarray}
namely
\begin{eqnarray}
\bigl\{ \widetilde{\Psi }(t, x^{1}), \ \widetilde{\Psi }^{+}(t, {x'}^{1})\bigr\}&=&\delta (x^{1}-{x^{1}}')\nonumber\\
\bigl\{ \widetilde{\overline{\Psi }}(t, x^{1}), \ \widetilde{\overline{\Psi }}^{+}(t, {x'}^{1})\bigr\}&=&\delta (x^{1}-{x'}^{1}).
\end{eqnarray}
In the completely usual way we may expand these second quantized fields on annihilation and creation operators $b_{n}$ and $b^{+}_{n}$ for momentum $p^{1}=n$,
\begin{eqnarray}
e^{-\frac{1}{2}\Omega (t,x)}\Psi (t,x^{1})
=\widetilde{\Psi }(t,x^{1})&=&\sum _{n=-\infty}^{\infty}\widetilde{b}_{n}e^{(-x^{2}-ix^{1})\cdot n} \nonumber\\
&=&\sum _{n=-\infty}^{\infty}\frac{\widetilde{b}_{n}}{z^{n}}
\end{eqnarray}
and
\begin{eqnarray}
e^{-\frac{1}{2}\Omega (t,x)}\overline{\Psi} (t,x^{1})
=\widetilde{\overline{\Psi }}(t,x^{1})&=&\sum _{n=-\infty}^{\infty}\widetilde{\overline{b}}_{n}e^{(-x^{2}-ix^{1})\cdot n} \nonumber\\
&=&\sum _{n=-\infty}^{\infty}\frac{\widetilde{\overline{b}}_{n}}{z^{n}}
\end{eqnarray}
Here the $\widetilde{b}_{n}$ and $\widetilde{\overline{b}}_{n}$ have the usual anti-communication relations
\begin{eqnarray}
\bigl\{ \widetilde{b}_{n},\widetilde{b}^{+}_{m} \bigr\}=\delta _{nm}\nonumber\\
\bigl\{ \widetilde{\overline{b}}_{n},\widetilde{\overline{b}}^{+}_{m} \bigr\}=\delta _{nm}
\end{eqnarray}
They anti-commute if we ask $b$ with $b$ rather than with $b^{+}$ or $\{b,\overline{b}^{(+)}\}=0$.
A priori we should put a bracket with $\Omega$ i.e.\ $[\Omega]$ onto all these creation and annihilation operators $\widetilde{b}^{+}_{n}$, $\widetilde{\overline{b}}^{+}_{n}$, $\widetilde{b}_{n}$ and $\widetilde{\overline{b}}_{n}$ so as to write rather $\widetilde{b}^{[\Omega]+}_{n}$, $\widetilde{\overline{b}}^{+[\Omega]}_{n}$, $\widetilde{b}^{[\Omega]}_{n}$ and $\widetilde{\overline{b}}^{[\Omega]}_{n}$. However, since they have the same properties and could be identified if we insisted it is not really needed. In this creation notation the second quantized Hamiltonian comes to look
\begin{eqnarray}
H&=&\frac{1}{4\pi }\sum _{n=-\infty}^{\infty}\bigl( n+\frac{1}{2}\bigr) \bigl( \widetilde{b}^{+[\Omega]}_{n} \widetilde{b}^{[\Omega]}_{n}-\widetilde{b}^{[\Omega]}_{n} \widetilde{b}^{[\Omega]+}_{n} + \widetilde{\overline{b}}^{+[\Omega]}_{n}\widetilde{\overline{b}}^{[\Omega]}_{n}-\widetilde{\overline{b}}^{[\Omega]}_{n}\widetilde{\overline{b}}^{[\Omega]+}_{n}\bigr).
\label{2.34}
\end{eqnarray}
Corresponding to this Hamiltonian we can then construct the Dirac Sea by filling the single particle states with negative energies. Using that for the ``right moving'' $\Psi (t,x^{1})$ or $\widetilde{\Psi} (t,x^{1})$
\begin{equation}
E=p^{1}
\end{equation}
while for the ``left moving'' $\overline{\Psi} (t,x^{1})$ or $\widetilde{\overline{\Psi}}(t,x^{1})$ we have
\begin{equation}
E=-p^{1}
\end{equation}
we construct the Dirac Sea vacuum as
\begin{equation}
|\mbox{sea}>=\Pi _{n\ge 0}\widetilde{\overline{b}}^{[\Omega]+}_{n}|\overline{0}>\otimes \Pi _{m< 0}\widetilde{b}^{[\Omega]+}_{m}|0>1
\end{equation}
where $|\overline{0}>$ and $|0>$ represent the ``fundamental'' vacua in the bar and no bar sectors in which there is not even the Dirac Sea(s). (see Fig.\ref{fig:1})
\section{SIGNIFICANCE OF THE DIRAC SEA}\label{sec:significance}
It is the philosophy of the present article to think of e.g.\ vacuum expectation value of $T_{\mu \nu }(x)$ (in the vacuum with the Dirac sea) as being due to this Dirac sea. To get meaningful results for a Dirac sea it is however needed to regularize it in some way or another so as to obtain finite though cut-off dependent e.g.\ energy density rather than just divergence.
The technique which we also describe in a slightly simplified form in the following section consists in inventing a series of massive particle species some of which are ``ghosts'' in the sense of being counted negatively when it comes to the constructions of, say, $T_{\mu \nu }$ from their Dirac seas
~\cite{6}~\cite{7}~\cite{8}. ``Negatively counted'' species may be a better name since ``ghosts'' is used for something similar but not exactly the same. That is to say that in our regularization procedure we introduce two series of extra species with different masses. Then the idea is to let some of these extra species count negatively --- in the sense that their contributions to energy momentum etc.\ (say particle number charge) are counted negatively--- while others are counted just as usual fermions.
The basic idea now is to arrange the masses for these extra species so as to cancel out the contributions from the numerically large single particle energies so that the combined system of species together with the original fermion gets cut off. The typical mass of the extra species come to function as the cut-off scale $\wedge $.
In order to get the main (quadratic) divergence field we need to have including the original fermion just equally many species ``counted negatively'' as counted positively.
In order to get the logarithmic divergence cancel it is needed to arrange that the coefficient to the term of the form $\frac{1}{n}$ in the large $n$ expansion of the energy $E_{n}$ of the momentum $n\sim p$ level for the various introduced extra species plus the original fermion cancel out. Since the energy $E_{n}$ of a particle with mass $m$ and momentum $n=p$ is large $n$ expanded as
\begin{equation}
E_{n}=\pm \sqrt{m^{2}+n^{2}}\approx \pm \bigl(n+ \frac{m^{2}}{2n}t\ldots \bigr)
\end{equation}
the condition to be imposed to cut-off the ``logarithmic divergence'' --- meaning the coefficient in the total counting (the ``negative counted'' counted with an extra minus sign) to $\frac{1}{n}$ --- is
\begin{equation}
\sum _{\mathrm{species}\atop{\mathrm{``extra + original''}}}\pm m^{2}=0
\label{c1}
\end{equation}
Here of course the $\pm$ is --- for the ``negative'' and + for the original fermion and extra species being counted positively. In the same notation the main cut-off being indeed a cut-off condition is
\begin{equation}
\Sigma _{\mathrm{species}\atop{\mathrm{``extra \, plus \, original''}}}\pm1=0.
\label{c2}
\end{equation}
With the conditions (\ref{c1} and \ref{c2}) the combined system will indeed function as a cut-off.
Since the proposed cut-off is just based on massive particles (counted through negatively some of them), it will be Lorentz invariant and translational invariant and particle number conserving. However, it will \underline{no more} have \underline{conformal invariance}, nor \underline{Weyl invariance}, nor chiral invariance!
With such a cut-off we should thus be able to preserve Lorentz invariance and translational invariance, but risk anomalies in Weyl and scale invariance.
That is to say that for a vacuum, which does not \underline{spontaneously} break the mentioned symmetries, we should find $<T_{\mu \nu}>{{\propto}\atop {\sim }} g_{\mu \nu} $.
While in the uncut-off theory it looks so conformally invariant that the variation of $\Omega$ in the metric $e^{2\Omega }\eta_{\mu \nu }$ is not felt the fermion field, with our cut-off procedure such an influence can come in. Indeed the mass terms in the Lagrangian for our extra species become $\Omega $-dependent. Indeed we have in the Weyl-transformation modified flat space-time metric $g_{\mu \nu}=e^{2\Omega }\eta_{\mu \nu }$ the massive Dirac equation Lagrangian
\begin{equation}
\sqrt{g}L_{D}=\sqrt{g}\overline{\Psi }(x)(r^{\mu }e^{\mu }_{a}\partial _{\mu }-m)\Psi (x)
\end{equation}
so that even after going to the tilded notation $\widetilde{\Psi} (x)=e^{\frac{1}{2}\Omega }\Psi (x)$ we have the mass term
\begin{equation}
\sqrt{g}L=\ldots +\widetilde{\Psi }^{+}\gamma ^{0}m \widetilde{\Psi } e^{\Omega }.
\label{3.5}
\end{equation}
By expanding the exponential $e^{\Omega }$ this term gives rise to ``Yukawa''-like couplings of the ``extra species'' to $\Omega $.
If we do not care for the logarithmic divergence but only go for calculating the Weyl or conformal anomaly meaning $T^{\mu}_{\ \mu}$ we may not care to fulfill (\ref{c1}) and can if we like do with only one extra species, and that one should then be negatively counted. We shall do so in the following section 4.
\section{WEYL ANOMALY FROM DIRAC SEA}\label{sec:weyl anomaly}
\subsection{On how to extract $T^{\mu }_{\> \> \mu }$}\label{subsec:how to}
Since the Weyl or equivalently the conformal is known to mean that the trace of the energy momentum tensor~\cite{9} $T^{\mu }_{\> \> \mu }$ turns out to be nonzero, in fact $-\frac{1}{48\pi }R$ where $R$ is the Ricci scalar curvature, we need a procedure for extracting this energy momentum tensor $T^{\mu \nu}$.
It is well known [Birrel-Davies] that (interpreting $T_{\mu \nu}$ as renormalized are (see the footnote 2 lines above)) the expression in terms of the fields of a theory including a metric $g_{\mu \nu }$ for the energy momentum tensor $T_{{\mu }\nu}$ is obtainable from the action $S$ by functional differentiation with respect to the metric
\begin{equation}
T_{\mu \nu}=\frac{\delta S_{\scriptstyle(\mathrm{matter})}}{\sqrt{g}\delta g^{\mu \nu}}
\nonumber
\end{equation}
or
\begin{equation}
T_{\mu \nu}=-\frac{\delta S_{\scriptstyle(\mathrm{matter})}}{\sqrt{g}\delta g_{\mu \nu}}
\label{4.1}
\end{equation}
If we rather than the formal expression in terms of the fields such as $\Psi $ would like the expectation value in (say) the vacuum situation with some background gravitational field --- we think of our $g_{\mu \nu}=e^{2\Omega }\eta _{\mu \nu}$ above --- we might extract this expectation value for $T_{\mu \nu}(\vec{x})$ at a certain space-time point $\vec{x}$ by logarithmically functionally differentiating the vacuum to vacuum S-matrix / transition matrix element
\begin{equation}
<T_{\mu \nu }>=\frac{\delta <\mathrm{sea}|e^{-i\int _{-\infty}^{\infty}Hdt}|\mathrm{sea}>}{\delta g^{\mu \nu }}
\Big/<\mathrm{sea}|e^{-i\int _{-\infty}^{\infty}Hdt}|\mathrm{sea}>
\end{equation}
Here the vacuum in which we are interested should be --- of course --- the one \underline{with} the Dirac Sea filed. Here it should be understood that the second quantized Hamiltonian $H$ should contain the background metric $e^{2 \Omega }g_{\mu \nu }$ (unless it ``accidentally'' drops out). This means that \underline{a priori} the vacuum could develop away from being a vacuum --- getting e.g.\ pairs produced --- due to the effect of the background metric. However, as we have seen in the action (\ref{2.20}) formally our background field $\Omega $ and thus $g_{\mu \nu}=e^{2\Omega} \eta _{\mu \nu}$ does \underline{not} influence the fermions described in the $\widetilde{\psi },\widetilde{\overline{\psi }}$ notation at all. Thus unless the cut-off might change the situation, there is no effect of the considered background metric. Thus if this holds the Dirac Sea vacuum $|\mathrm{sea}>$ will remain undistributed by the background metric in the $\widetilde{\psi },\widetilde{\overline{\psi }}$ notation.
Let us remember though that it is only because we keep to the still \underline{conformally flat} metric $e^{2\Omega} \eta _{\mu \nu}$, that there is no effect of the background metric. Keeping to metric only being of the $e^{2\Omega} \eta _{\mu \nu}$ type we cannot extract the $T_{\mu \nu }$ proper, because we cannot vary the metric arbitrarily, but we \underline{can} extract the \underline{trace} $T^{\mu }_{\> \> \mu }=g^{\mu \nu }T_{\mu _\nu}=g_{\mu \nu }T^{\mu \nu }$, since indeed
\begin{eqnarray}
T^{\mu}_{\ \mu}&=& g_{\mu \nu}T^{\mu \nu} \nonumber\\
&=& g_{\mu \nu }\frac{\delta \ln <\mathrm{sea}|e^{-i\int _{-\infty}^{\infty}Hdt}|\mathrm{sea}>}{\delta g_{\mu \nu }} \nonumber\\
&=& \frac{1}{2}\frac{\delta \ln <\mathrm{sea}|e^{-i\int _{-\infty}^{\infty}Hdt}|\mathrm{sea}>}{\delta \Omega }.
\end{eqnarray}
Indeed we have of course with $g_{\mu \nu }=e^{2 \Omega }\eta _{\mu \nu}$ that
\begin{eqnarray}
\frac{\delta \ln <\mathrm{sea}|e^{-i\int _{-\infty}^{\infty}Hdt}|\mathrm{sea}>}{\delta \Omega }
&=&\frac{\partial g_{\mu \nu } }{\partial \Omega}\Bigm|_{\vec{x}} \frac{\delta \ln <\mathrm{sea}|e^{-i\int _{-\infty}^{\infty}Hdt}|\mathrm{sea}>}{\delta g_{\mu \nu }}\nonumber\\
&=&2g_{\mu \nu }\frac{\delta \ln <\mathrm{sea}|e^{-i\int _{-\infty}^{\infty}Hdt}|\mathrm{sea}>}{\delta g_{\mu \nu }}\nonumber\\
&=&2g_{\mu \nu }T^{\mu \nu}=2T^{\mu}_{\ \mu}
\end{eqnarray}
We might thus extract the trace $T^{\mu }_{\> \> \mu }$ of the energy momentum tensor $T_{\mu \nu }$ alone from varying the background field by some (extra) Weyl transformation by say $\omega $ i.e.\
\begin{equation}
g_{\mu \nu }\to e^{2\omega }g_{\mu \nu }
\end{equation}
and looking for the variation of the $S$-matrix element from vacuum to vacuum.
Since we already wrote the formalism above for a by one Weyl transformation modified space, it may be most effective to just combine the further for $T^{\nu }_{\ \mu }$-extracting purposes introduced Weyl transformation $\omega $ with the already introduced one $\Omega $ to one combined
\begin{equation}
\Omega_{\mathrm{total}}=\Omega + \omega
\label{4.6}
\end{equation}
Weyl transformation
\begin{equation}
\eta _{\mu \nu }\to e^{2 \Omega _{\rm{total}}} \eta _{\mu \nu }.
\end{equation}
\subsection{``Second quantized formalism''}\label{subsec:second}
Since in $\widetilde{\overline{\psi }},\widetilde{\psi }$-formulation we have effectively flat space --- only with an $S^{1}$-circle space, $R$x$S^{1}$ space-time --- in spite of a non-trivial $\Omega$ or say $\Omega _{\mathrm{total}}$ in (6.4), we can in reality in $\Omega _{\mathrm{total}}$-independent way expand the second quantized fermion fields $\widetilde{\overline{\psi }}$ and $\widetilde{\psi }$ and their hermitean conjugate annihilation and creation operators
\begin{equation}
\widetilde{\psi }(\vec{x})=\Sigma _{n \ \rm{integer}\atop{-\infty}}^{\infty}b_{n}e^{\rm{in}(x'-t)}
\label{4.8}
\end{equation}
and correspondingly the daggered second quantized fields would be expanded on a priori creation operators
\begin{eqnarray}
\widetilde{\psi }^{+}(\vec{x})&=&\Sigma _{n \ \rm{integer}\atop{-\infty}}^{\infty}\widetilde{b}^{+}_{n}e^{\rm{-in}(x'-t)}\\
\label{4.9}
\widetilde{\overline{\psi }}(\vec{x})&=&\Sigma _{n \ \rm{integer}\atop{-\infty}}^{\infty}\overline{\widetilde{b}}^{+}_{n}e^{\rm{in}(x'-t)}
\label{4.10}
\end{eqnarray}
Formulated in the complete space-time description using eq.\ (\ref{2.22}) inserted into the formulas (\ref{4.8}-{4.10}) we obtain the expansion for $\psi $'s.
\begin{eqnarray}
\widetilde{\psi }(z)&=&\Sigma _{n=-\infty}^{\infty}\widetilde{b}_{n} \frac{1}{z^{n+\frac{1}{2}}}\nonumber\\
\widetilde{\overline{\psi }}(\overline{z})&=&\Sigma _{n=-\infty}^{\infty}\widetilde{\overline{b}}_{n} \frac{1}{\overline{z}^{n+\frac{1}{2}}}
\end{eqnarray}
\subsection{The Dirac Sea}\label{subsec:dirac sea}
If we work in the Euclidean space-time, we have for the flat case --- or if we ignore as we can $\Omega _{\mathrm{total}}$ because it does not couple ---.
A priori we have a different world each time we change the background field $\Omega _{\mathrm{total}}$ and thus in principle we should have the creation and annihilation operators $\widetilde{b}^{+}_{n}$, $\widetilde{\overline{b}}^{+}_{n}$, $\widetilde{b}_{n}$ and $\widetilde{\overline{b}}_{n}$ depend on $\Omega _{\mathrm{total}}=\Omega + \omega $ so that we should write for example $\widetilde{b}^{[\omega ]}_{n}$, $\widetilde{\overline{b}}^{[\omega ]}_{n}$, $\widetilde{b}^{+[\omega ]}_{n}$ and $\widetilde{\overline{b}}^{[\omega ]}_{n}$.
However, since $\Omega_{\mathrm{total}}$ does \underline{not} appear in the equations of motion of the $\widetilde{\psi }$ and $\widetilde{\overline{\psi }}$-fields we may suggestively ignore such $\omega $-dependence and identify them such as
\begin{equation}
\widetilde{b}^{[\omega ]}_{n}=\widetilde{b}^{[0]}_{n}
\end{equation}
the energy operator $-\frac{d}{dt}$ while the momentum operator is $-i\frac{d}{dx^{'}}$
Thus the dispersion relation is depicted in Fig.\ref{fig:1}.
\section{OUR CUT-OFF PROCEDURE}\label{sec:cut-off}
We have seen above that formally the modification of the metric from $\eta _{\mu \nu}$ to $e^{2\Omega(x')}\eta _{\mu \nu}$ (see (\ref{2.3})) makes no change in the Hamiltonian (see (\ref{2.34})). Really this is seen (also) from the equations (\ref{2.34}), in which $H'$ has the same expression in $\widetilde{b}^{[\omega ]}_{n}$ and $\overline{\widetilde{b}}^{[\omega ]}_{n}$ as $H$ in $\widetilde{b}^{[0]}_{n}$ and $\overline{\widetilde{b}}^{[0]}_{n}$.
We can thus stress that in these (formal) expressions the modification of the metric has completely dropped out. So any dependence on the ``Weyl transformations'' on the flat space can only come in via the regularization.
In this section 5 we shall now propose a regularization of most importantly the Dirac sea.
Indeed, a regularization is performed by adding to the system yet a particle species in addition to the fermion described by $b^{[0]}_{n}$ or $b^{[\omega ]}_{n}$ (which are actually equal to each other). This added particle species should have the quantities to result from its Dirac sea be \underline{subtracted} rather than added as for our usual fermion. So we should declare that energy momentum and particle number from the Dirac sea for this added species should be counted with an extra minus sign.
Let us immediately give the idea that using our earlier works an ``Dirac sea for Bosons''~\cite{6}~\cite{7}~\cite{8} a boson with exactly the same action and equations of motion as the fermions we start from would have this property of subtraction its contribution from collected quantities such as energy momentum and particle number because we found out the Dirac sea for bosons should have one particle \underline{removed} (i.e.\ added -1 particle) from each negative energy single particle state. Thus we should imagine that our added species to cancel the contribution from the fermions could be a boson with exactly the same equation and spin etc.\ as the fermion. It would thus not obey the spin statistics theorem in general but rather remind of a ghost-particle species.
Now we do not want such a proposed ghost to cancel all the contribution from the fermion Dirac sea, but only the beyond the wanted cut-off part. We propose therefore the ``ghost particle'' --- the boson with fermionic equation of motion and ``spin'' --- to have a mass $M$ of the order of the wanted cut-off $\wedge $, i.e.\ $M \sim \wedge $. Using such a massive ``ghost'' has the advantage of letting the theory including the cut-off have the usual symmetries such as translational invariance and particle number conservation, but not Weyl invariance and not chiral symmetry.
So with this cut-off procedure we cannot get anomalies in momentum or particle number conservation, but we ``risk'' to get a Weyl anomaly as well as a chiral anomaly.
While the massless fermion is formally untouched by the modification $\Omega (x)$ of the metric $e^{2 \Omega(x)}\eta _{\mu \nu }$ the ghost-like boson to remove its high energy contribution will ``feel'' this modification via its mass $M$. Indeed the mass term should in principle be understood relative to the physical metric tensor $g_{\mu \nu}=e^{2\Omega}\eta_{\mu \nu }$.
We shall be allowed to use totally flat space-time as long as we do not cut-off and use the $\widetilde{\psi }$-fields. However, the mass term which brings the cut-off by being there for the compensating ghost-like species must physically be defined in the $\psi$ -notation.
For instance the mass term $M \overline{\psi}_{nc} \psi_{nc}$ for one of our ``negatively counted'' fields in $\psi_{nc}$-notation would be written in the $\widetilde{\psi}_{nc}$-notation as analogous to (\ref{3.5})
\begin{eqnarray}
L_{\mathrm{mass}\ {nc}_{1}}&=&\wedge \overline{\psi }_{nc}(x)\psi_{nc}(x) \nonumber\\
&=&\wedge \overline{\widetilde{\psi}}_{nc}e^{-\frac{1}{2}\Omega(x)} e^{-\frac{1}{2}\Omega(x)} \widetilde{\psi}_{nc}(x)\nonumber\\
&=&\wedge \overline{\widetilde{\psi}}_{nc}(x)\widetilde{\psi}(x)e^{-\Omega(x)}
\end{eqnarray}
by using for $\psi_{nc}$ the analogous rewritting to a tilde field $\widetilde{\psi}_{nc}$ to (\ref{2.19}). Here we used the symbol $\wedge$ for the mass in the untilded notation to remind us that this mass is indeed a cut off. In the tilde-notation we then have an (effective) mass
\begin{equation}
M(x^{\mu})=\wedge e^{-\Omega (x)}
\end{equation}
which is now space-time dependent.
It is not hard to check that this $\Omega$-dependence of the effective mass $M(x^{\mu})$ is consistent with dimensionality considerations. In fact the distance element $ds$ is given by
\begin{eqnarray}
ds^{2}&=&g_{\mu \nu}dx^{\mu}dx^{\nu} \nonumber\\
&=&e^{2\Omega}\eta_{\mu \nu }dx^{\mu}dx^{\nu}
\end{eqnarray}
so that the physical distance $ds$ is $e^{\Omega}$ times the flat distance element $ds_{\rm{FLAT}}$ given by
\begin{equation}
ds^{2}_{\rm{FLAT}}=\eta_{\mu \nu}dx^{\mu}dx^{\nu}
\end{equation}
So a mass $M$ given as say $\wedge$ in the $ds$ measuring would in the flat notation based on $ds_{\rm{FLAT}}$ look like being scaled opposite to the distance --- since $M$ has dimension of inverse distance ---
\begin{equation}
M(x^{\mu})=\wedge e^{-\Omega}
\end{equation}
Of course such an $x^{\mu}$-dependent mass $M(x^{\mu})=\wedge e^{-\Omega(x^{\mu})}$ can be interpreted as an interaction of the Fermion (really the boson-ghost) in a Yukawa-type way with the background field $\Omega(x)$ by expanding the mass term
\begin{equation}
M(x)\overline{\widetilde{\psi}}_{nc}\widetilde{\psi}_{nc}=\wedge \overline{\widetilde{\psi}}_{nc}\widetilde{\psi}_{nc}-\Omega(x)\wedge \overline{\widetilde{\psi}}_{nc}\widetilde{\psi}_{nc}+\ldots
\end{equation}
The $\Omega$-dependence only come in via the higher order terms in this expansion firstly of course via
\begin{equation}
-\wedge \Omega \overline{\widetilde{\psi}}_{nc}\widetilde{\psi}_{nc}
\end{equation}
and there is no interaction with the original fermion field $\widetilde{\psi}$ itself, only with this ``compensating'' ghost-field $\widetilde{\psi}_{nc}$. ($\mbox{nc}$ stands for ``negatively counted.'')
To truly cancel even logarithmic divergenses we may need both positively and negatively counted massive extra particles such as $\psi_{nc}$. In fig 1.5 a suggestive picture of the dispersion relations for the originally occurring particles plus the extra species invented in order to regularize the energy and momentum from the Dirac sea. These dispersion relations are just ordinary relativistic dispersion laws, only the extra ``ghosts'' are massive while the original particle is massless.
\begin{figure}[htbp]
\begin{center}
\includegraphics[width=12cm,angle=270]{NBI-Fig_1.5.eps}
\end{center}
\caption{This figure illustrates the dispersion relations for the original
fermion and the ``little series " of three for regularization purpose
introduced particle species, two of which are counted as there being a
negative number $-1$ of particles in the Dirac sea as indicated by the
small minussigns ``-" on the negative energy branches of the dispersion
relations. The four different species have their dispersion relations
denoted by respectively.}
\label{fig:1.5}
\end{figure}
\section{CALCULATION OF TWO-POINT FUNCTION FOR $\omega$, THE WEYL TRANSFORMATION FUNCTION}\label{sec:culculation}
In order to evaluate the dependence of the functional derivative (\ref{4.1}) on a further Weyl transformation $\omega$ leading to the $\Omega_{\mathrm{total}}=\Omega+\omega$ (\ref{4.6}) we need to evaluate $ln <\mathrm{sea}|e^{-i\int _{-\infty}^{\infty}Hdt}|\mathrm{sea}>$ at least to second order in $\Omega_{\mathrm{total}}$. Now according to the discussion above --- see (\ref{2.20})--- the $\Omega_{\mathrm{total}}$ dependence come in only via the cut-off which means then the extra particle which in our simplified case is the negatively counted field. We shall now calculate the one-loop correction to the second order term in the background $\Omega_{\mathrm{total}}$ field. It is the idea here to do this by considering the one loop ``vacuum'' diagrams due to the $\Omega_{\mathrm{total}}$ interaction vertices.
\begin{figure}[htbp]
\begin{center}
\includegraphics[width=8cm,angle=270]{NBI-Fig_2.eps}
\end{center}
\caption{}
\label{fig:2}
\end{figure}
\begin{eqnarray}
\text{Fig.~\ref{fig:2} a) }&\leftrightarrow& \Omega \wedge \overline{\widetilde{\psi}}_{nc}\widetilde{\psi}_{nc} \nonumber\\
\text{Fig.~\ref{fig:2} b) }&\leftrightarrow& \frac{1}{2}\Omega^{2} \wedge \overline{\widetilde{\psi}}_{nc}\widetilde{\psi}_{nc}
\end{eqnarray}
The one loop diagrams second order in $\Omega_{\mathrm{total}}$ ``vacuum'' Feynman-diagrams are
\begin{figure}[htbp]
\begin{center}
\includegraphics[width=8cm,angle=270]{NBI-Fig_3.eps}
\end{center}
\caption{}
\label{fig:3}
\end{figure}
It is possible to argue that keeping included all the species proposed in our cut-off procedure in section 2 with the conditions (\ref{c1}) and (\ref{c2}) we can achieve convergence. In fact we see that for dimensional and Lorentz invariance reasons both diagrams give terms proportional to the mass square multiply a logarithmic divergence or a finite dimension as number. Since the logarithmic divergence will not depend on the distance between $x$ and $x'$ we thus with our condition (\ref{c1}) we achieved convergence for the sum of these diagrams over the introduced species. I.e.\ the regularizations works.
There of course should come the $\pm 1$ sign factor from the loop from the Furrey-theorem which is missing for the negatively counted species, since they are effectively bosons.
As long as we do not look for the dependence on $x-x'$ or equivalently the momentum $p$ conjugate to $x-x'$ our regularization cause cancellation of both the Fig.3 a) and Fig.3 b) diagrams. Since however Fig.3 a) is only nonzero for $x-x'=0$ it means that this diagram offer the summation over our species become totally zero.
So we only have to evaluate the diagram Fig.3 b). Now we shall remember that \\
1)this diagram only gets constructions from the \underline{extra} species (not the original fermion).\\
2)The mass(es) of the extra species are really the cut-off scale, i.e.\ very large.
Thus in the $\vec{x}$-representation this diagram \ \ only contributes when there is very short distance between the points $\vec{x}$ and $\vec{x}'$. Thus we should in principle be allowed to Taylor expand this diagram in $\vec{x}'-\vec{x}$. If we go to the Fourier transformation i.e.\ to the momentum representation we should correspondingly be allowed to take the small $\vec{p}$ approximation, only including terms proportional to the first few powers in $\vec{p}$. I.e.\ we shall assume $\vec{p}<<m$, as of course is natural since the $m$ are of the scale of the cut-off.
The diagram becomes in $\vec{x}$-representation in the simplified form of only one negatively counted species.
\\
\\
\begin{equation}
\text{Fig.~\ref{fig:3} b) }=\Omega(\vec{x})\cdot < T\bigl(G(\vec{x},\vec{x}')G(\vec{x},\vec{x}')\bigr)> \Omega(\vec{x}')
\end{equation}
where the $\vec{x}$-representation propagator for ``negatively counted particle'' is denoted
\begin{equation}
G(\vec{x},\vec{x}')=\int \frac{d^{2}p}{(2\Pi )^{2}}\frac{i}{\not{p}-M}e^{ip(\vec{x},\vec{x}')}.
\end{equation}
Let us give a name $K(\vec{x},\vec{x}')$ to the coefficient of the product $\frac{1}{2}\Omega(\vec{x})\Omega(\vec{x}')$. Of course from translational invariance this $K(\vec{x},\vec{x}')$ only depends on the difference $\vec{x}'-\vec{x}$ and may Fourier transform its dependence on this difference $\vec{x}'-\vec{x}$.
\begin{equation}
K(\vec{x}',\vec{x})=\int e^{i\vec{p}(\vec{x}'-\vec{x})}\widetilde{K}(\vec{p})
\frac{d^{2}\vec{p}}{(2\Pi )^{2}}
\end{equation}
where then the Fourier transformed $\widetilde{K}$ of $K$ is easily seen to be given by
\begin{equation}
\widetilde{K}(\vec{p})=M^{2} \int T_{r}\Bigl(\frac{i}{\not{q}-M}\frac{i}{\not{p}+\not{q}-M}\Bigr)
\frac{d^{2}\vec{q}}{(2\pi )^{2}}
\end{equation}
It should be had in mind that because our in the loop encircling particle is a ``negatively counted one'' we at the end do \underline{not} have the Furry-theorem's sign corresponding to the loop being a Fermion loop but rather we should treat it as a boson loop with respect to Furry sign. I.e.\ there should be no Furry-sign.
Had we included the full set of extra species the logarithmic divergence of the integral would have cancelled out to be 0. Actually for dimensional reasons $\widetilde{K}(\vec{p}=0)$ would cancel completely to zero due to (\ref{c1}). So the only important surviving term is in fact the term in $\widetilde{K}(\vec{p})$ second order in $\vec{p}$ i.e.\ we expand
\begin{equation}
\widetilde{K}(\vec{p})=\widetilde{K}(\vec{p}=\vec{0})+B\vec{p}^{2}+\ldots
\label{6.6}
\end{equation}
and we need only to compute $B$. Here then
\begin{equation}
B=\frac{1}{2}\eta^{ij}\frac{\partial}{\partial p^{i}} \frac{\partial}{\partial p^{i}} \frac{\widetilde{K}(\vec{p})}{M^{2}}\cdot \frac{1}{d}\Bigr| _{\vec{p}=0}
\end{equation}
where $d$ is the dimension of space-time, $d=2$.
In order to perform the differentiations $\frac{\partial}{\partial p^{i}}$ with respect to the ``external'' momentum $\vec{p}$ we make use of the general rule for differentiating inverse matrices
\begin{equation}
\frac{d}{d\xi }\bigl(\underline{\underline{A}}(\xi )^{-1}\bigr)-A(\xi )^{-1}
\frac{d\underline{\underline{A}}(\xi )}{d\xi }\underline{\underline{A}}(\xi )^{-1}
\end{equation}
(This is obtainable by differentiating first by Leibnitz rule the definitional equation for the inverse of a matrix $\underline{\underline{A}}(\xi )^{-1}\underline{\underline{A}}(\xi )=1$)
We then easily obtain
\begin{eqnarray}
B&=&M^{2}\int Tr\Bigl( \bigl( \frac{1}{\not{q}-M}\bigr)^{4}\Bigr)\frac{d^{2}q}{(2\pi )^{2}}\cdot \frac{2}{2\cdot d}\\
&=&\frac{1}{2}\int Tr\Bigl( \bigl( \frac{\not{q}+M}{q^{2}-M^{2}}\bigr)^{4}\Bigr)\frac{d^{2}q}{(2\pi )}
\end{eqnarray}
Wick-rotated this $B$ becomes, using $q^{M}_{E}=(iq^{0},q')$
\begin{equation}
B=\frac{1}{2}\int Tr\Biggl( \frac{(\not{q}_{E}+M)^{4}}{(q^{2}_{E}+M^{2})^{4}} \Biggr) \frac{d^{2}q_{E}}{(2\pi )^{2}}
\end{equation}
We now use as usual
\begin{equation}
\not{q}^{2}_{E}=-q^{2}_{E}
\end{equation}
and
\begin{equation}
Tr(\not{q}_{E})=0.
\end{equation}
and $Tr(\underline{\underline{1}})=2$ because a Dirac spiror has 2 components in $d=2$ and also
\begin{equation}
\frac{d^{2}q_{E}}{(2\pi )^{2}}=\frac{2\pi |q_{E} |d |q_{E}|}{(2\pi)^{2}}=\frac{d(|q_{E}|^{2})}{4\pi}
\end{equation}
and obtain for the coefficient divided by $M^{2}$, $B$
\begin{equation}
B=\frac{2}{2}\int _{|q_{E}|=0}^{\infty}\frac{|q_{E}|^{4}-6|q_{E}|^{2}M^{2}+M^{4}}{(q_{E}^{2}+M^{2})^{4}}
\frac{d|q_{E}|^{2}}{4\pi}.
\end{equation}
Here we used
\begin{equation}
(\not{q}_{E}+M)^{4}=\not{q}_{E}^{4}+4\not{q}^{3}_{E}M+6\not{q}^{2}_{E}M^{2}+4\not{q}_{E}M^{3}+M^{4}.
\end{equation}
Changing to the variable
\begin{equation}
\mu=q^{2}_{E}+M^{2}
\end{equation}
which should be integrated from $M^{2}$ to $\infty $, we write
\begin{equation}
|q_{E}|^{2}=\mu-M^{2}
\end{equation}
and thus we get
\begin{eqnarray}
B&=&\int _{M^{2}}^{\infty}\frac{(\mu-M^{2})^{2}-6M^{2}(\mu-M^{2})+M^{4}}{\mu^{4}}\frac{d\mu}{4\pi}\nonumber\\
&=&\int _{M^{2}}^{\infty}\frac{\mu^{2}-8M^{2}\mu+8M^{4}}{\mu^{4}}\frac{d\mu}{4\pi}
\nonumber\\
&=&\int _{M^{2}}^{\infty}\Biggl[\frac{1}{\mu^{2}}-\frac{8M^{2}}{\mu^{3}}+\frac{8M^{4}}{\mu^{4}}\Biggr]
\frac{d\mu}{4\pi}
\nonumber\\
&=&\Biggl[\frac{1}{1\cdot M^{2}}-\frac{8M^{2}}{2\cdot (M^{2})^{2}}+\frac{8M^{4}}{3\cdot (M^{2})^{3}}\Biggr] \cdot \frac{1}{4\pi}
\nonumber\\
&=&\frac{1}{M^{2}}\Bigl(1-4+\frac{8}{3}\Bigr)\cdot\frac{1}{4\pi}
\nonumber\\
&=&\frac{-1}{4\pi M^{2}}\cdot\frac{1}{3}
\nonumber\\
&=&\frac{-1}{12\pi M^{2}}
\end{eqnarray}
So the coefficient in the expansion (\ref{6.6}) to $\vec{p}^{2}$ thus is
\begin{equation}
M^{2}B=\frac{-1}{12 \pi}.
\end{equation}
Fourier transforming back to $\vec{x}'-\vec{x}$ representation we have
\begin{equation}
p^{2}- \partial_{\mu}\partial^{\mu}
\end{equation}
and we shall remember that the Ricci curvature scalar is
\begin{equation}
R=\partial_{\mu}\partial^{\mu}\Omega _{\mathrm{TOTAL}}.
\end{equation}
Thus a term in $ln <\mathrm{sea}|e^{-i\int _{-\infty}^{\infty}Hdt}|\mathrm{sea}>$ of the form \\$ln <\mathrm{sea}|e^{-i\int _{-\infty}^{\infty}Hdt}|\mathrm{sea}>=..+\frac{1}{2}\Omega M^{2}Bp^{2}\Omega+..$, means that differentiating with respect to $\Omega$ so as to extract $T^{\mu}_{\> \> \mu}$ would give
\begin{eqnarray}
<T^{\mu}_{\> \> \mu}>&=&M^{2}Bp^{2}\Omega \nonumber\\
&=&\frac{1}{2}M^{2}BR
\end{eqnarray}
Thus we derived
\begin{eqnarray}
T^{\mu}_{\> \> \mu}&=&\frac{1}{2}M^{2}BR=M^{2}BR\nonumber\\
&=&-\frac{1}{24\pi}R
\end{eqnarray}
This is the well-known Weyl anomaly.
\section{CONCLUSION}
We have recomputed the Weyl anomaly --- the relation of which to the conformal anomaly is described below--- in the physical picture of being due to the Dirac sea, the energy and momentum described by the $T_{\mu \nu}$- tensor is simply the ones of the Dirac sea particles. As always you can only obtain an anomaly after having had to regularize. We have proposed a somewhat new regularization method a bit reminiscent of Pauli-Villars regularization. It consists in introducing in addition to the original particles in the theory a little series of similar formal particles. The crucial feature of some of the introduced formal species of particles is that they are declared to \underline{count negatively} in the Dirac sea. Taken it that these formally introduced and negatively counted behave analogous to the original particles except that they get assigned masses of the order of the cut off scale, $M \sim \wedge$, we easily see that the contributions to e.g.\ energy density or momentum density from the numerically highest energy particles in the (combined) Dirac sea get cancelled and thus an effective cut off of these numerically high energy contributions. By a little series of formally introduced particles counted some negatively and some positively it is possible to cancel the divergencies in the Dirac sea contribution to e.g.\ the $T_{\mu \nu}$ -tensor as we discussed it in the present article. Thus one achieves in this way our regularization.
\subsection{Relation to Bosons}
Taking into account our earlier article ``Dirac sea for Bosons'' in which we consider it that there is in second quantizing Bosons removed one boson from each negative energy state analogous to Diracs adding one for fermions one sees a great similarity of our negatively counted formal regularization-particles and Bosons with the wrong spin. In fact our formally introduced negatively counted particles corresponding to the Fermions described $\psi$ above must indeed be essentially Bosons just with the wrong ``spin'' or equivalently set of field components inherited from the Fermions they shall regularize, because indeed in Feynmanloop integrals it would be needed to have from the negatively counted particles analogous loops with just the same large loop four momentum dependence of the integrand so as to cancel the divergence. Such a cancellation could just be achieved due to the Furry sign ---making the Boson and Fermion loops provided with opposite signs--- if we simply take the negatively counted specied to be opposite statistics ``for regularization purpose introduced'' particles; that is to say as bosons if the original particle species is a Fermion, or oppositely.
In this sense our regularization scheme (proposal) has some similarity to the role of super-symmetry in removing the (in)famous quadratic divergence in the Higgs mass square thereby presenting a solution to the so called hierarchy problem. Both cancellations are due to Bosons cancelling Fermions or opposite. But there is one important difference between the SUSY cancellation in which Fermions and sfermions both obeying spin statistics theorem and our regularization method in which say a Fermion contribution is cancelled by a Boson that is a ``ghost'' in the sense that it \underline{does not obey the spin statistics theorem}.
According to our ``Dirac sea and Bosons'' work~\cite{6}~\cite{7}~\cite{8} such a violation of the spin statistics theorem would mean that our regularized theory has the possibility of negative norm states (in the Fock space) if you truly treat the to be negatively counted particles regularizing the Fermions as Bosons. But that is just as usual: a regularized theory is not satisfactory in all respects.
The Weyl symmetry anomaly we recomputed turned out that the energy momentum tensor $T^{\mu}_{\> \> \mu}$ which would have been expected to be zero in a truly Weyl invariant theory turned out to be instead
\begin{equation}
T_{\> \> \mu}^{\mu}=\frac{1}{48\pi}R.
\nonumber
\end{equation}
The easy way to see that naively the $T^{\mu }_{\> \> \mu}$ should be zero if there had been Weyl invariance meaning that varying $\Omega$ in $g_{\mu \nu}=e^{2 \Omega}\eta_{\mu \nu}$ is think of that the equation
\begin{eqnarray*}
T_{\mu \nu}&=&\frac{\partial W}{\partial g^{\mu \nu}}\nonumber\\
W&=&ln <\mathrm{sea}|e^{-i\int _{-\infty}^{\infty}Hdt}|\mathrm{sea}>
\nonumber
\end{eqnarray*}
implies $T_{\> \> \mu}^{\mu}=0$ if $W$ does not depend on $\Omega$.
Let also remark that the Weyl symmetry in which we have recomputed the anomaly is closely related to conformal invariance.
In fact if a theory in 2 dimensions is Weyl invariant, it will be conformal invariant even when a background gravity field is included. In a parametrization in which the metric tensor of the background gravity field of the form $g_{\mu \nu}=e^{2\Omega} \eta_{\mu \nu}$ a conformal transportation of the matter fields could namely be extended to the background gravity field by varying just the scalar field $\Omega(\vec{x})$. Thus if this variation, which just is a Weyl transformation $\Omega \rightarrow \Omega + \omega$ is indeed a symmetry of the theory, then the theory will also be conformally invariant even though the space-time is not flat. Then of course if there is an anomaly in the Weyl invariance symmetry there will be a corresponding one in the conformal symmetry for the \underline{curved} space.
This should also be true in general even for flat space since even in flat space a conformal transformation being extended to the gravitational field would induce a Weyl transformation. However, the special form of the Weyl anomaly, being proportional to the curvature scalar $R$ will of course turn out to vanish in the flat space. So because of this feature that the anomaly of be proportional to the curvature (scalar) $R$ one becomes allowed to say that there is in \underline{flat space} no conformal anomaly.
\subsection{Outlook}
We hope that we can find other examples of applying to anomaly calculations:
a) the regularization method with the not spin statistics theorem obeying ``ghost'' particles
b) the Dirac sea as a physical picture.
Here we have in mind attempting to recompute the gravitational anomalies~\cite{9} in chiral Fermion theories (with Majorana-Weyl in $2+4 n$ dimensions.)
\section*{Acknowledgement}
The authors acknowledge Yasuhiro Sekino for discussions and comments. One of us (H.~B.~N.) thanks for hospitality to stay at the Niels Bohr Institute as emeritus professor. One of the authors (M.~N.) would like to thank the Niels Bohr Institute for hospitality extended to him during his stay there. M.~N. also expresses acknowledgement for financial support to the JSPS Grant-in-Aids for Scientific Research Nos.21540290, 23540332 and 24540293.
|
{
"timestamp": "2012-06-28T02:03:00",
"yymm": "1206",
"arxiv_id": "1206.6076",
"language": "en",
"url": "https://arxiv.org/abs/1206.6076"
}
|
\section{Introduction}
After nearly a half century of pulsar observations, we still do not know
the detailed location of the emission zones in the neutron star magnetosphere.
However the general consensus is that the radio emission arises from the `open'
field line zone above the magnetic poles at modest altitudes, from a few to a few
tens of neutron star radii. In contrast, the $\gamma$-ray
emission, as measured by {\it Fermi} \citep{psrcat}, is dominated by
high altitudes $> 0.1 R_{LC}$, where the light cylinder radius is $R_{LC}=cP/2\pi$.
Thus the emission zones and light curves for these two bands generally differ.
However, recently {\it Fermi} has detected $\gamma$-ray emission from a number
of millisecond pulsars where the entire magnetosphere is outside of
$R_{NS} \approx 0.2/P_{ms} R_{LC}$, so that radio emission must be from `high altitude' \citep{k12b}.
Further, \cite{kj07} and \cite{jw06} have found evidence
that for young energetic pulsars, the radio emission is dominated by an altitude of
$\sim 1000$ km ($\sim 100 R_{NS}$). This is $\sim 0.2 R_{LC}$ for P=100 ms, and
it is precisely such young, energetic pulsars which are $\gamma$-bright.
Thus, if one is interested in $\gamma$-emitting pulsars, one must also
consider radio emission from an appreciable fraction of the light cylinder radius.
Since the first radio observations, the high linear polarization and rapid
position angle sweep of many pulsars at cm wavelength have been used as a clue to
the geometry of the emission zone. The foundation for such study is the
\cite{rc69} `rotating vector model' (RVM), which follows the sweep
of the magnetic field line tangent of a point dipole as projected on the sky.
Of course, finite altitude radio emission violates the point source RVM assumption and
\cite{bcw91} (hereafter BCW) gave simple approximations for the effects of
relativistic aberration at small altitude. In this approximation, the polarization
position angle is
\begin{equation}\label{eq:BCW}
\psi=\arctan\left[\frac{3 r \sin(\zeta) - \sin(\alpha) \sin(\phi+r)}
{\sin(\zeta) \cos(\alpha) -\cos(\zeta) \sin(\alpha) \cos(\phi+r)}\right],
\end{equation}
where the inclination angle between rotation axis and magnetic axis is $\alpha$, the viewing angle is
$\zeta$, and the pulse phase is $\phi$.
The RVM formula is recovered in the limit as the scaled emission height,
$r\equiv r_{em}/R_{LC}$, goes to zero.
Here the principal effect is
a lag in the phase of the maximum rate of the polarization sweep ${\rm d}\psi/{\rm d}\phi|_{max}$
of $\Delta\phi \approx 2r$ from the phase of the magnetic axis.
If the absolute position angle of the magnetic axis on the plane of the
sky is known (eg. from the position angle of the spin axis), \cite{ha01}
show that the observed polarization gives a second height estimate,
$\Delta\psi \approx \frac{10}{3} r\cos(\alpha)$, where
\begin{equation}\label{eq:BCWPhi}
\psi=\arctan\left[\frac{-\sin(\alpha)\sin(\phi-2r)}
{\sin(\zeta) \cos(\alpha) -\cos(\zeta) \sin(\alpha) \cos(\phi-2r)}\right]+\Delta\psi
\end{equation}
\citep{d08}. In practice it is generally
unclear how to measure the magnetic axis polarization angles; most authors treat $\Delta\psi$
as a nuisance parameter.
Of course, both formulae presume knowledge of the phase of closest approach
of the magnetic axis $\phi=0$. The phase of the radio pulse peak is often used,
but these pulses can have complex, multi-component morphology. Further, the
special relativistic effects shift the intensity peak forward, giving a net
observable lag of the polarization sweep from the intensity peak
of $\Delta \phi \approx 4r$.
The shifts have been clarified and extended to include the effects of field
line sweepback by \cite{dh04}, and \cite{d08}.
Nevertheless, observers generally fit to the zero altitude (RVM) limits of
the formula to constrain $\alpha$ and $\zeta$ and, when possible,
estimate the shift of ${\rm d} \psi/{\rm d}\phi|_{max}$ to constrain the altitude,
using the linear (BCW) scaling. While this works adequately for many non-recycled
pulsars, relatively high altitude emission is inferred for young energetic objects. For
millisecond pulsars the basic RVM model often does not fit well.
Thus, recent strong interest in $\gamma$-ray emitting pulsars draws our
attention to objects where the radio emission may extend to 0.1$R_{LC}$ or higher,
where the standard RVM treatment is suspect. We seek here to quantify this breakdown:
if one applies an RVM/BCW fit and obtains estimates of the magnetic inclination
angle $\alpha_f$, viewing angle $\zeta_f$, and emission height $r_f$, for what ranges
of these parameters are these fits `valid', i.e. when do the fit values and uncertainty
ranges include (at some prescribed probability) the real value
$r_r$? We develop this analysis as a guide to observers wishing to
interpret pulsar polarization data and as an indication to situations where detailed
fits to numerical models (eg. Parent et al. 2011) are required. In addition,
we suggest analytic corrections to allow useful $r_f$
estimates from simple RVM fits to extend to somewhat higher altitude.
\section{Simulation Model Assumptions}
Our approach is to use a specific 3-D magnetosphere model with plausible
radio emission zones, to `fit' the resulting light curve and polarization
sweep with the point dipole RVM formula and to parametrize the errors.
For simplicity the field lines are given by the basic swept back (retarded) dipole
popular in models of high altitude $\gamma$-ray emission \citep{rw10} and
we assume that the radiating particle bunches follow the magnetic field lines.
In the spirit of the RVM model, we make a simple geometric construction,
projecting the field line tangent at the emission point in the lab frame
onto the plane of the sky and assume that the radio emission is polarized
parallel to (or perpendicular to) this vector. We do not attempt here to
superpose multiple emission heights or to compute intrinsic polarization
fractions. Nor do we include other physical effects such as possible
cross-field drift of the emitting charge bunches, current-induced departure
from the vacuum structure for energetic pulsars \citep{spit06} or higher-order
multipole/offset dipole effects that may be important in the small
magnetospheres of millisecond pulsars \citep{hm11}. While our simple
construction ignores these possible effects, we do capture the dominant
effect of dipole sweep-back and our computed polarization sweeps pass
smoothly to the RVM model curves at low altitude; the other physical effects
likely only dominate very close to the light cylinder.
We assume here that the radio emission comes from a single altitude,
within the open zone. We then must define the open zone shape and the illumination
across it. Of course, there is a formal cap shape for the vacuum retarded
dipole solution, where the locus of field lines tangent to
the light cylinder trace to a cap on the surface with opening angle
$\theta_R (\phi_{cap})$ varying with azimuth $\phi_{cap}$ around the magnetic axis.
Alternatively, it is common to assume a simple circular cap, with
surface angle $\theta_C (\phi_{cap})=$ constant. To roughly match the open zone
beam sizes at an emission height of 0.1$R_{LC}$ we chose a surface cap angle of $\theta_C=2^\circ$
for a neutron star of $R_{NS} = 10^{-3}R_{LC}$, i.e. a $\sim 0.2$\,s pulsar.
For simplicity and to follow the BCW picture, we illuminate the open
zone with a simple Gaussian profile
\begin{equation}\label{eq:Ipulse}
I \propto e^{(\theta_{cap}/\theta_0)^2},\qquad {\rm with }\qquad \theta_0=2^\circ/{\sqrt{\ln 5}}
\end{equation}
so that the intensity falls by 5$\times$ at the `edge' of the simple circular cap.
The angles are measured at the star surface, although the corresponding radio flux
may be emitted at high altitude.
We note that there is some evidence that a conal intensity distribution
with a patchy illumination may be more typical of many pulsars \citep{lm88,kj07}.
To generate a model polarization sweep we select a magnetic inclination, $\alpha_r$,
and emission height, $r_{r}$. We then project the swept-back field lines
at this altitude to the plane of sky and record the results on a 2D sky map.
Horizontal cuts across this map at a given viewing angle, $\zeta_r$, give the
polarization angle sweep, $\psi(\phi)$. We assign `measurement' errors to each value inversely proportional
to the pulse flux at its phase. We assume that the observer's integration achieves
a uniform signal-to-noise at pulse maximum, so that the polarization measurement error
there is $1^\circ$. For pulsars observed far from the magnetic axis at large
$|\beta|\equiv |\zeta-\alpha|$ this implies longer integration.
As the pulse flux falls toward the edge of the open zone the polarization
angle uncertainties increase.
\subsection{Estimating $\phi=0$}
Use of the simple Gaussian illumination with the pulse phase at the
intensity peak (the projected phase of closest approach to the magnetic axis)
corresponds to the BCW assumptions. Except for very high altitude emission, where
field lines overlap in the sky map and pulse caustics can occur, this gives
a simple prescription from which $\phi=0$ may be estimated via the BCW shift.
However, conal emission concentrated to the cap edge significantly complicates
the determination of pulse phase. One effect is the variable sweep-back at the
leading and trailing edge of the cap. Another is the particular shape of the open
zone boundary. We illustrate these effects by marking a `peak phase', the
midpoint of the projected open zone boundary, both for a simple circular cap and
for the more detailed retarded dipole cap. Figure ~\ref{fig:Plotcap} displays
the peak phase shifts for these different definitions.
\begin{figure}[h]
\begin{center}
\includegraphics[width=.46\textwidth]{imageCaps.eps}
\caption{Pulse phase estimates for $\alpha=45^\circ$, $\phi(\zeta) = 0$
for two different altitudes, $r=0.1R_{LC}$ and $r=0.3R_{LC}$.
$G_C$: Peak phase from maximum of simple Gaussian intensity weighting.
$C_C$: Peak phase from center of cap edges (circular cap).
$C_R$: Peak phase from center of cap edges (retarded dipole cap).}
\label{fig:Plotcap}
\end{center}
\end{figure}
Not unexpectedly, Figure ~\ref{fig:Plotcap} shows that the pulse phase is
more sensitive to the details of the open zone geometry for a conal emission zone.
The offsets shown there illustrate the effect of the retarded potential field
line flaring at high altitude. To this should be added the uncertainties associated
with identifying the magnetic axis phase in the presence of patchy conal emission and
non-dipole field structure (for millisecond pulsars). Nevertheless, as we shall show, a substantial
fraction of the phase offset is insensitive to the choice of cap center, and can
be corrected.
\subsection{`Fitting' an RVM curve}
The retarded dipole field structure increasingly departs from the point dipole
as the scaled emission height $r=r_{em}/R_{LC}$ approaches unity. Thus
if one fits polarization data for a low altitude emitter with the RVM model, the
fit parameters $\alpha_f,\zeta_f$, and $r_f = \Delta \phi_f/4$ at the $\chi^{2}$ minimum will be
good approximations to the real values $(\alpha_r, \zeta_r, r_r)$. For modest $r_r$
the RVM fit will absorb the sweep shape departures, (correctably) biasing the
parameter estimates, while retaining reasonable $\chi^{2}$. At large altitude, the
$\chi^{2}$ will be poor, the parameters will be uncorrectably far from the true
values and a fit to a detailed numerical model will be required. The key question
is how, with realistic errors $\sigma_{PA}$, the unabsorbed distortion grows.
We use our estimated $\sigma_{PA}$ to construct a `$\chi^2$' weighted
departure of the RVM model from the detailed retarded field simulation. This is the
weighted systematic error caused by the inability of the RVM model to absorb
the detailed shape of the retarded field curve. In a real observation, additional
statistical measurement errors would increase `$\chi^2$' above our model value,
especially for small $r$. Any unmodeled physical effects should additionally increase
the value of `$\chi^2$' above $\sim 1$/(degree of freedom) at the minimum.
Observers typically adopt the increase $\Delta \chi^2=\chi^2-\chi_{min}^2$ to
estimate the confidence intervals on the fit parameters. We are free to do the
same here, since our prescription weights appropriately show where the model
parameters are most sensitive to the data values. We have confirmed this by
fits to a series of Monte Carlo simulations of polarization angle data with added statistical errors,
showing that $\Delta \chi^2$ follows the usual distribution for the appropriate
numbers of degrees of freedom.
\section{Correcting for Bias in the RVM Height Estimates}
\begin{figure}[h]
\begin{center}
\includegraphics[width=.46\textwidth]{totDirFitPhi.eps}
\caption{
Altitude limits for effective RVM fits. Each panel shows the distribution
of simulated model fits (color bar) in offset from the true altitude as a function
of fit altitude $r_f/R_{LC}$. The dark band shows the systematic bias in the
fit offset. The four panels are for different assumptions about the cap
illumination and method of estimating the true phase of $\phi=0$.
$E_B$: Perfect knowledge of the location of magnetic axis in phase, without the use of an intensity model.
$G_C$: Simple Gaussian intensity peak, $\phi=0$ inferred from the altitude dependent shift of $I_{max}$.
$C_C$: Peak intensity assigned to the center of a double pulse from edges of an open zone circular cap footpoints.
$C_R$: Peak intensity assigned to the center of a double pulse from edges of an open zone above the detailed retarded dipole cap.
The green curve shows our estimate of the bias, Equation (\ref{eq:genCor}).
}
\label{fig:totDirFitPhi}
\end{center}
\end{figure}
Our principal goal here is to test the utility of standard RVM fits and
to provide a prescription to allow these fits wider applicability for pulsars
with high altitude emission. To do this we compare the RVM fit estimate $r_f$ with
the simulated value $r_r$. Since the mapping is not simple, statements about
ranges of validity are perforce statistical. This makes our answers mildly
sensitive to the distribution in the underlying pulsar population. Here
we assume that our parent
pulsar population has isotropically distributed inclination and viewing angles,
ie. Prob$(\alpha)\propto {\rm sin}(\alpha)$, Prob$(\zeta) \propto {\rm sin}(\zeta)$
while the altitude is distributed uniformly on $0\le r_r\le 0.3$. Note that we
only {\it observe} a usable polarization sweep if a pulsar produces a minimum
number of phase bins (here $\Delta \phi_{obs}>0.1$). In turn,
this means that our observable pulsar population is biased toward modest
$|\beta| = |\alpha-\zeta|$.
We generate a set of pulsar models and apply the RVM fits. This delivers
a set of observables ${\alpha_f, \zeta_f, r_f; \sigma_{\alpha_f}, \sigma_{\zeta_f}, \sigma_{r_f}}$
where the fit values are determined by $\chi^2$ minimum and the error ranges
are estimated from the curvature of the $\chi^2$ surface. An observer presented
with this set of measurements must infer the original pulsar properties.
Focusing here on the height measurement, we test the systematic bias in
the RVM estimate. For best comparison with the BCW assumptions, we work with
the height determined from the phase lag measured from the peak of a Gaussian pulse
centered on the magnetic axis. In Figure ~\ref{fig:totDirFitPhi}, the color scale
represent the number of pulsars in the simulated population
at a given altitude derived from fitting RVM versus the difference between fitted and
real altitude. Figure ~\ref{fig:totDirFitPhi} shows that $r_f$ increasingly underestimates
$r_r$ at increasing altitude. A simple formula to provide improved height estimates
$r_f'$ from RVM fits is then
\begin{equation}\label{eq:genCor}
r_f' = r_f+ 0.2 (r_f/0.5)^2,
\end{equation}
as plotted in Figure ~\ref{fig:totDirFitPhi}. The line fits best to the darkest
ridge (The ridge that contains a majority of simulated pulsars)
for the models using the maximum of a simple Gaussian intensity peak ($G_C$)
and the center of the cap edges for a circular cap ($C_C$). For the case using
the center of the cap edge for a retarded dipole cap ($C_R$), the line
slightly under-predicts the
darkest ridge and does not capture the behavior of the
second ridge which is caused by the shift of the central line from the cap notch
(see Figure ~\ref{fig:Plotcap}). We can (unrealistically) assume that
we know where in phase the magnetic axis
is located and calculate the altitude from the shift in polarization directly. Inaccuracies
in altitude are then from Equation (\ref{eq:BCWPhi}) alone.
With the assumption of perfect knowledge of
the magnetic axis ($E_B$), we see the departure from the BCW formulation occurs
at lower altitudes. Apparently, the estimate $\Delta \phi=-2r_f$
for the peak intensity shift preserves good accuracy to higher altitude
than the $\Delta \phi=+2r_f$ shift of the polarization
sweep, especially when the intensity arises from a circular cap.
\begin{figure}[h]
\begin{center}
\includegraphics[width=.4\textwidth]{SmapGenCorrected8V2.eps}
\caption{
Maximum useful $r_f$ altitude (color bar) in the ($\alpha_f$, $\zeta_f$) plane for four
assumptions about the pulse intensity beam shape (see text for our criterion
for good fit accuracy). Left: BCW estimates before
correction. Right: corrected heights using Equation (\ref{eq:genCor}).
Green contours indicate the area where at least fifty simulated pulsars
were fit to an ($\alpha_f$,$\zeta_f$) pair.
}
\label{fig:SmapCorrected8}
\end{center}
\end{figure}
In practice, the height offset depends on the geometrical angles
$(\alpha, \zeta)$. In addition, the height estimate is affected by uncertainty
in estimating the polarization sweep lag, i.e. in determining the phase of the pulse
(or equivalently the phase of the magnetic axis). These effects are shown in
Figure ~\ref{fig:SmapCorrected8}. For each panel we show, as a function of the estimated angles
$(\alpha_f, \zeta_f)$, the maximum height (color bar) at which the estimated altitude is
accurate. For the estimate to be useful, we require that $r_r$ lies in
the range $r_f\pm\sigma_{r_f}$ for a large fraction (99\%) of the observable
model pulsars. At small altitude this is always true. At large altitude
the distortion due to the retarded field structure causes increasing
departure from the BCW estimate. Once too small a fraction of models produce
useful fits, the BCW approximation `breaks down'. Lowering the required fraction
does not drastically change the results seen in Figure ~\ref{fig:SmapCorrected8}, since
the fraction of failing models increases very rapidly with fit altitude. Also shown
is a green contour that marks the area where the bins contain at least
fifty simulated pulsars. Uncolored bins are where the BCW approximation is
inaccurate at the lowest altitude. The contours are independent
of the intensity model (the contours are the same for each model) because
the $\alpha_f$, $\zeta_f$ bin
depends only on the polarization sweep which is calculated independently
of the intensity model.
A strong dependence between the break-down altitude and $\alpha_f$ and $\zeta_f$ exists as can
be seen in Figure ~\ref{fig:SmapCorrected8}. This is not due to any
difficulty in finding the phase center but arises from the nontrivial relation between
the shift in the maximum sweep of the polarization and the geometry angles.
In Figure ~\ref{fig:SmapCorrected8}, we can see that for $\alpha_f$ and $\zeta_f$
further from $90^\circ$, BCW tends to break down at a lower altitude.
The shift in the maximum sweep of the polarization angles for these values is smaller
than predicted by the BCW model. Since the BCW model has no dependence on
$\alpha$ and $\zeta$, it is not surprising that the break-down altitude has
a dependence on these angles.
The panels show the maximum useful height for
four different estimates of the phase lag: (top-to-bottom) perfect knowledge of
the magnetic axis, a Gaussian pulse peaked on the magnetic axis field line, a
`conal' pulse from a field lines with a circular cap on the star and a `conal' pulse with
a cap determined by the detailed open zone of the retarded vacuum solution.
Notice that most observed pulsars have modest $|\beta|=|\zeta-\alpha|$, and
are close to the diagonal.
The right panels show the equivalent maximum useful height when the
estimate has been corrected according to Equation (\ref{eq:genCor}). While the uncorrected
estimates for the Gaussian pulse peak model are useful
only to an average (over $\alpha_f$ and $\zeta_f$) height of $\overline{r_f}=0.11R_{LC}$, the corrected
estimates are usable to higher altitudes (reaching $r_f'\sim 0.3$ for the commonly
observed case of near-orthogonal rotators) with an average of $\overline{r_f}=0.22R_{LC}$.
Again, corrected
RVM estimates from a model radio pulse do better than estimates
assuming perfect knowledge of the magnetic axis, since the retarded potential
phase shifts are a fractionally larger contribution to the phase offset in this
case.
\begin{figure}[h]
\begin{center}
\includegraphics[width=.4\textwidth]{zetaResmagMin.eps}
\caption{
Altitude limits for effective RVM fits. Each panel shows the distribution
of simulated model fits (color bar) in offset from the true altitude as a function
of fit altitude $r_f/R_{LC}$ for different $\zeta_f$, assuming a simple Gaussian intensity peak,
$\phi=0$ inferred from the altitude dependent shift of $I_{max}$.
For these plots, the simulated pulsar population has been
summed over $\alpha_f$ to emphasize the dominance of $\zeta_f$ in the correlation.
The green curve shows our estimate of the bias with dependence on
$\zeta_f$, Equation (\ref{eq:zetaCor}).
}
\label{fig:zetaResmagMin}
\end{center}
\end{figure}
We can improve the heuristic correction function by including the
viewing geometry. The bulk of the sensitivity is evidently due to $\zeta_f$,
as illustrated by the relatively small dispersion of the $r_f$ error for
individual $\zeta_f$ slices (see Figure ~\ref{fig:zetaResmagMin} for
a Gaussian central pulse).
Accordingly, we have made an alternate corrected height estimate
\begin{equation}\label{eq:zetaCor}
r_f' = r_f+ [0.3 + 0.7 |\cos(\zeta)| ] (r_f/0.5)^{3}
\end{equation}
where $r_f=\Delta \phi/4$, as usual. This greatly extends the range for which
a simple RVM height estimate can be used (Figure ~\ref{fig:SmapCorrected4}). This estimate, based
on a Gaussian radio pulse emitted along the swept back magnetic axis, is
in general the best function for an observer to use with no other information. It provides significant
improvement in the emission height accuracy for the circular cone pulse profiles.
\begin{figure}[h]
\begin{center}
\includegraphics[width=.4\textwidth]{SmapCorrected4V2.eps}
\caption{Maximum altitude for accurate height estimates (color bar) in the
($\alpha_f$, $\zeta_f$) plane, after applying Equation (\ref{eq:zetaCor})
(see text for our criterion for good fit accuracy).
Note that the improvement is best for a circular (Gaussian or conal) cap.
Green contours indicate the area where at least fifty simulated pulsars
were fit to an ($\alpha_f$,$\zeta_f$) pair.
}
\label{fig:SmapCorrected4}
\end{center}
\end{figure}
Of course if one has reason to believe that a particular pulse
profile shape is more accurate, a different correction function may be
preferred. For example if one had a double pulse arising from the open zone edges ($C_R$) and
had high confidence that this pulse filled the retarded vacuum dipole open
zone, one would correct by
\begin{equation}\label{eq:zetaCor2}
r_f' = r_f+ [0.3 +2 |\cos(\zeta)|^{2} ] (r_f/0.5)^{3}.
\end{equation}
This formulation raises the average over $\alpha_f$ and $\zeta_f$ of
the maximum useful height from $\overline{r_f}=0.05R_{LC}$ with no correction to
$\overline{r_f}=0.15R_{LC}$.
In general, we recommend that when an observer fits an RVM model to pulsar
data, obtaining viewing angle and polarization sweep lag measurements,
they correct their height estimate using Equation (\ref{eq:zetaCor}). This is
particularly useful whenever the RVM fit appears statistically adequate,
but the resulting phase lag suggests a significant emission height.
The change to the estimated height will be small for $r_f < 0.2$,
but the accuracy of the resulting estimate will be greatly increased.
Of course, whenever $\chi^2$/DoF $\gg1$ at the fit minimum, it is a sign
that the model is inadequate. In many cases, this will be due to
unmodeled orthogonal mode jumps and intervening scattering \citep{k09},
higher order multipoles, etc. However, for large altitudes and
multi-altitude emission the effects of sweep back and the formation
of caustics (which dominate $\gamma$-ray light curves) become dominant.
The observer should be aware that large $\chi^2$ at the fit minimum
can signal such effects and, when the inferred altitude is large, consider
fitting the data to numerical models of 3-D pulsar magnetospheres.
\section{Height calculation from shift in $\psi$}
We can alternatively estimate $r_f$ and errors using the shift in $\psi$ \citep{ha01},
\begin{equation}\label{eq:HA}
\Delta\psi \approx \frac{10}{3} r \cos(\alpha) \left[\frac{3}{8}+\frac{5}{8} \cos(\zeta-\alpha)\right]\\
- \frac{47}{18} r \sin(\alpha) \sin(\zeta-\alpha)
\end{equation}
or, in the small $|\beta|=|\zeta-\alpha|$ limit, $\Delta\psi\approx \frac{10}{3} r \cos(\alpha)$.
As before, we compute the residual, $r_r-r_f$, as a function of $\alpha_f$, $\zeta_f$,
and $r_f$. To estimate an emission height from the polarization shift in $\phi$,
one needs an estimate for $\phi=0$, e.g. from a pulse peak intensity model; no such intensity
model is needed if we have a measurable shift in $\psi$.
The increase with $r_f$ are shown in Figure ~\ref{fig:totDirFitPsy},
where the left panel uses the small $\beta$ limit while the right uses the full formula.
As for the $\Delta\phi$ estimate, the errors increase with $r_f$. However here, even
when the full equation (\ref{eq:HA}) is used, the corrections show a substantial spread.
In fact the uncorrected formula proves accurate ($|r_f-r_r|$ within $\sigma_{r_f}$ 99\% of the
time) only for $\overline{r_f} < 0.08$ (where $\overline{r_f}$ is again
the average over $\alpha_f$ and $\zeta_f$)
and for $\zeta_f <60^\circ$ or $\zeta_f >120^\circ$.
For near-orthogonal rotators the estimate is unreliable at the lowest altitudes.
\begin{figure}[h]
\begin{center}
\includegraphics[width=.46\textwidth]{totDirFitPsy.eps}
\caption{
Altitude limits for effective RVM fits using the shift in $\psi$. Each panel shows the distribution
of simulated model fits (color bar) in offset from the true altitude as a function
of fit altitude $r_f/R_{LC}$. The dark band shows the systematic bias in the
fit offset.
The residual is more scattered when the altitude is measured from
the shift in $\psi$ instead of the shift in $\phi$ of the polarization
sweep.
On the left is the residual using the small $|\beta|$ limit.
The green curve shows our estimate of the bias from the
shift in $\psi$, Equation (\ref{eq:DPsi1}).
}
\label{fig:totDirFitPsy}
\end{center}
\end{figure}
A heuristic correction to the $\Delta \psi$ estimate for Equation (\ref{eq:HA}) can be made
for $\zeta_f <60^\circ$ or $\zeta_f >120^\circ$
\begin{equation}\label{eq:DPsi1}
r_f' = r_f+ 0.4 (r_f/0.5)^{2}
\end{equation}
which allows accurate estimates to $\overline{r_f}=0.12R_{LC}$. Including the $\zeta$ dependence,
\begin{equation}\label{eq:DPsi2}
r_f' = r_f+ [0.2 +0.1 |\cos(\zeta)|^{2}] (r_f/0.5)^{2}
\end{equation}
raises the useful range to $\overline{r_f}=0.18R_{LC}$. Considering that the correction
for the common orthogonal rotator case is especially poor, and that it is often
difficult to infer the intrinsic $\psi_0$,
height estimates from the phase shift remain much more useful.
\section{Pulse Width Dependence on Emission Height}\label{sec:rW}
Since the field lines flare in the open zone, the full phase width $W$ of the
observed radio pulse can also be checked against the expected radio emission
altitude. The standard prescription assumes a circular cap and static dipole field
lines to infer a minimum height
\begin{equation}\label{eq:rW}
r_{W}=\frac{4}{9}\arccos^{2}\left[\cos(\alpha)\cos(\zeta)+\sin(\alpha)\sin(\zeta)\cos\left(\frac{W}{2}\right)\right].
\end{equation}
In Figure ~\ref{fig:totDirFitW} we show that the retarded dipole field flares
{\it more} than predicted by this simple formula and hence the minimum height
in Equation (\ref{eq:rW}) is an {\it over}-estimate. Thus, in general, lower
altitudes are consistent with a given observed pulse width than suggested by this
formula. Moreover, we expect that the general effect of currents in the magnetosphere
will be to increase the foot-point angles of the open zone.
This further increases the allowed
$W$ at a given height.
\begin{figure}[h]
\begin{center}
\includegraphics[width=.46\textwidth]{totDirFitW.eps}
\caption{
Altitude limits for effective pulse width. Each panel shows the distribution
of simulated model fits (color bar) in offset from the true altitude as a function
of fit altitude $r_f/R_{LC}$.
$W_C$: Circular cap. $W_R$: Retarded dipole cap.
The simple static dipole formula overestimates the altitude needed
to accommodate a given pulse with $W$ in the open zone. The error depends
on the viewing geometry $\alpha$ and $\zeta$, and bifurcates for the `notched'
cap of the formal retarded potential open zone.
}
\label{fig:totDirFitW}
\end{center}
\end{figure}
In general, larger widths are still most easily accommodated at large $r$ or
small $\alpha$, but sweep-back and magnetospheric currents substantially weaken
the minimum altitude constraints from the commonly used Equation (\ref{eq:rW}).
Given the large sensitivity to the details of the open zone volume and the presently
unknown effect of magnetospheric currents, it is not worth developing corrections to
this formula.
\section{Conclusions; Examples from Literature}
We conclude by examining a few RVM/BCW estimates of
emission height present in the literature.
In \cite{r11}, $\Delta \phi$ estimates were used to suggest
large emission heights for two young energetic pulsars. For J0538+2817 the
shift gives $r_f=0.15R_{LC}$, but RVM fitting only weakly constraints $\zeta$.
Applying Equation (\ref{eq:genCor}), we would infer $r^\prime_f = 0.17 R_{LC}$,
a small, but significant increase which makes it easier to accommodate the
large observed pulse width. Similarly PSR J1740+1000 gives $r_f=0.12R_{LC}$.
Here we constrain $\zeta=80^{\circ}$ to $130^{\circ}$,
so that the corrected fit altitude (Equation \ref{eq:genCor} or Equation \ref{eq:zetaCor}) is
$r^\prime_f=0.13R_{LC}$, again a small but statistically significant increase.
For millisecond pulsars the effects can be larger. For example, \cite{k12}
find that RVM fitting can be usefully applied to several
recycled pulsars. PSR J1502-6752 (P=26.7\,ms) is a mildly recycled
pulsar for which the phase lag implies $r_f=0.2R_{LC}$. With no significant $\zeta$
constraints, we apply Equation (\ref{eq:genCor}) to infer a 16\% altitude increase
to $r^\prime_f=0.23R_{LC}$. Similarly PSR J1708-3506 ($P=4.5\,$ms) has a phase shift
implying $0.19R_{LC}$, which we correct to $0.21-0.22 R_{LC}$. For this pulsar,
a naive application of the pulse width formula (\ref{eq:rW}) gives altitudes of
$r_{W_{10}}\simeq0.65R_{LC}$ (10\% peak width).
However, the increased $r^\prime_f$ and decreased pulse width height
from sweepback effects (Figure \ref{fig:totDirFitW}), along with additional current-induced
open zone growth, make it likely that the pulse width can be accommodated at the corrected height.
\cite{k12} also report a RVM/BCW height $r_f= 0.44R_{LC}$ for
the P=2.7\,ms pulsar J1811-2404, along with well constrained viewing angles of
$\alpha=89.7^{\circ}$ and $\beta=21^{\circ}$. While our full analysis does
not cover this altitude, as Figure \ref{fig:SmapCorrected4} shows the corrections
of Equation (\ref{eq:zetaCor}) give a very high accuracy for orthogonal
rotators viewed near $90^\circ$. Note in Figure \ref{fig:zetaResmagMin}, bottom
panel, that the correction function is nearly linear thus extrapolation to
somewhat higher values may be justified. Naively applying this correction we get
$r_f'=0.81R_{LC}$. We certainly cannot trust this value in detail since plasma
effects and other perturbations may be relevant at such altitudes. However, the
correction is certainly large and it brings the expected height up to an altitude
where the very wide observed radio pulse, and the likely detection of emission
from both open zones, can be easily accommodated.
Certainly simple RVM/BCW fitting is inadequate for this pulsar and one
should use a detailed model for the high altitude field geometry.
\bigskip
\bigskip
Our exercise extends the range of utility of RVM-fit polarization
sweeps for inferring the altitudes of radio pulsar emission. For fit altitudes
less than $r_f = 0.25 R_{LC}$ the corrections are not large, but they are
systematic and, for high S/N data localizing the phase of maximum polarization
sweep, they can be highly significant. We thus believe it is worth applying our recommended
correction. For larger altitudes the corrections grow rapidly, but we caution
that as one approaches the light cylinder, current-induced distortions
should increase and, except for near-orthogonal rotators, one would expect the RVM
formulae to provide a poor fit in any case. Fitting to detailed numerical models
is then preferred. In all cases the dominant residual uncertainty is likely in
locating the phase of the radio pulse.
We also checked the use of absolute polarization axis position angles
and pulse width to constrain the emission height. Here the difficulties in
establishing the unperturbed $\psi_0$ and the expected distortions of the open zone
boundaries by currents, etc. make the estimates much less useful. Nevertheless,
we have shown that the effects of sweepback do go in the direction of reconciling
observed pulsar properties to a consistent emission height: larger heights are
inferred by a given $\Delta \psi$ shift and larger pulse widths can be accommodated
at a given height. We feel, however, that the corrections are less quantitative
than for $\Delta \phi$.
In sum, since observers will continue to apply analytic RVM fits
to pulsar polarization data, by applying our recommended
correction (Equation \ref{eq:zetaCor}), these results
can continue to give accurate height estimates to $\le 0.3 R_{LC}$. At higher heights
which will be common for millisecond pulsars, a fit to more detailed numerical
models is likely warranted.
\acknowledgements
This work was supported in part by NASA grants NNX10AP65G and NAS5-00147.
|
{
"timestamp": "2012-06-28T02:00:53",
"yymm": "1206",
"arxiv_id": "1206.6131",
"language": "en",
"url": "https://arxiv.org/abs/1206.6131"
}
|
\section{Introduction}
Gluon scattering amplitudes have been known to be dual to Wilson loops along lightlike polygons.
While first shown at strong coupling (Ref.~\onlinecite{AM07}) through the famous AdS/CFT duality
introduced in Ref.~\onlinecite{Mal98}, this result has later been verified at weak coupling
(Refs.~\onlinecite{DKS08,BHT08}). For a review, consult Ref.~\onlinecite{AR08}.
Recently, a similar duality (at weak coupling) between the full superamplitude of
$\mN=4$ super Yang-Mills theory and a supersymmetric extension of the Wilson loop
has been claimed, of which two variants appeared almost simultaneously.
The first approach by Mason and Skinner (Ref.~\onlinecite{MS10}) originates in momentum twistor
space and translates into the integral over a superconnection in spacetime.
The second is due to Caron-Huot (Ref.~\onlinecite{CH11}) and attaches
to lightlike polygons certain edge and vertex operators, whose
shape is determined by supersymmetry constraints.
At the classical level, both approaches
are identical only on-shell (Ref.~\onlinecite{BKS12}).
Belitsky showed in Ref.~\onlinecite{Bel12} that the conjectured duality with superamplitudes
indeed holds, however only upon subtracting an anomalous contribution
from the super Wilson loop.
The operators in the Caron-Huot approach depend on momentum supertwistors.
While the edge operators are wellknown, explicit formulas for the vertex operators
have been available in the literature only up to fourth order in the Graßmann expansion.
The aim of this article is to fill this gap. We state explicit formulas for the
vertex operators up to maximum order. This is achieved by deriving a recursion
formula out of the supersymmetry constraints.
To fix notation, we let $W_n$ denote the super Wilson loop and
$\mE_i$ and $\mV_{i,i+1}$ the edge and vertex operators, respectively,
which depend on the (odd) momentum supertwistors $\eta_i^A$ and $\eta_{i+1}^A$.
At zeroth order, the ordinary Wilson loop should be recovered, thus leading to the ansatz
$\mE_i=p_i\cdot A+\mO(\eta)$ and $\mV_{i,i+1}=1+\mO(\eta)$.
The supersymmetry constraints are such that $\mQ_A^{\alpha}W_n=0$ is to vanish,
where the $\mQ_A^{\alpha}=q_A^{\alpha}+c_0\sum_i\lambda_i^{\alpha}\dd{\eta_i^A}$ act
on the fields as well as the momentum supertwistors. This is achieved if the
edges and vertices transform by an infinitesimal super gauge transformation
\begin{subequations}
\begin{align}
\label{eqnSusyE}
\mQ_A^{\alpha}\mE_i&=\frac{1}{g}\left(\partial_t-ig\scal[[]{\mE_i}{\cdot}\right)X^{\alpha}_{iA}\\
\label{eqnSusyV}
\mQ_A^{\alpha}\mV_{i,i+1}&=iX^{\alpha}_{i+1\,A}\mV_{i,i+1}-i\mV_{i,i+1}X^{\alpha}_{iA}
\end{align}
\end{subequations}
Here, and in the following, we adopt the conventions of Ref.~\onlinecite{BDKM04}.
\section{Edge Operators}
The edge operators are computed as sketched in Ref.~\onlinecite{CH11}. One finds the following
solution of (\ref{eqnSusyE}), making use of the Euler-Lagrange equations.
\begin{align*}
\mE_i&=\frac{1}{2}\lambda_{i\beta}\tlambda_{i\dbeta}A^{\beta\dbeta}
+\frac{i}{c_0}\tlambda_{i\dbeta}\tpsi_A^{\dbeta}\eta_i^A
-\frac{i\sqrt{2}}{2c_0^2}\frac{\tlambda_{i\dbeta}\lambda_{(i-1)\gamma}D^{\dbeta\gamma}\ophi_{AB}}
{\scal{i}{i-1}}\eta_i^A\eta_i^B\\
&\qquad+\frac{1}{3c_0^3}\varepsilon_{ABCD}
\frac{\lambda_{(i-1)\xi}\tlambda_{i\dbeta}\lambda_{(i-1)\gamma}D^{\dbeta\gamma}
\psi^{\xi A}}{\scal{i}{i-1}^2}\eta_i^B\eta_i^C\eta_i^D\\
&\qquad+\frac{i}{24c_0^4}\varepsilon_{ABCD}
\frac{\lambda_{(i-1)\gamma}\lambda_{(i-1)\xi}\tlambda_{i\dbeta}\lambda_{(i-1)\beta}D^{\dbeta\beta}F^{\gamma\xi}}
{\scal{i}{i-1}^3}\eta_i^A\eta_i^B\eta_i^C\eta_i^D
\end{align*}
with
\begin{align*}
X^{\alpha}_{iA}:=\frac{g\lambda_{i-1}^{\alpha}}{c_0\scal{i}{i-1}}
&\left(-2i\sqrt{2}\,\ophi_{AB}\eta_i^B
+\varepsilon_{ABCD}\frac{2\lambda_{(i-1)\gamma}\psi^{\gamma B}}{c_0\scal{i}{i-1}}\eta_i^C\eta_i^D\right.\\
&\qquad\left.+\frac{i}{3c_0^2}\varepsilon_{ABCD}\frac{\lambda_{(i-1)\gamma}\lambda_{(i-1)\beta}F^{\gamma\beta}}{\scal{i}{i-1}^2}
\eta_i^B\eta_i^C\eta_i^D\right)
\end{align*}
\section{A Recursion Formula}
We expand
\begin{align}
\label{eqnVCoefficients}
\mV_{i,i+1}&=\sum_{k=0}^4\sum_{l=0}^4
V_{A_1\ldots A_k\;B_1\ldots B_l}\,\eta_i^{A_1}\ldots\eta_i^{A_k}\eta_{i+1}^{B_1}\ldots\eta_{i+1}^{B_l}
\end{align}
and, similarly, denote the coefficients of $X_{iA}^{\alpha}$ by
\begin{align*}
X_{iA}^{\alpha}=X_{iA}^{\alpha(1)}+X_{iA}^{\alpha(2)}+X_{iA}^{\alpha(3)}
=X_{iAA_1}^{\alpha(1)}\eta_i^{A_1}+X_{iAA_1A_2}^{\alpha(2)}\eta_i^{A_1}\eta_i^{A_2}
+X_{iAA_1A_2A_3}^{\alpha(3)}\eta_i^{A_1}\eta_i^{A_2}\eta_i^{A_3}
\end{align*}
Let $V_0=1$ (i.e. $\mV_{i,i+1}=1+\mO(\eta)$) and require that $\mV_{i\,i+1}$ only
depends on the generators $\eta_i$ and $\eta_{i+1}$. Then
(\ref{eqnSusyV}) with $X^{\alpha}_{iA}$ as above has the following
unique solution: All coefficients $V_{B_1,\ldots,B_d}=0$ for $d>0$ (i.e. all ''pure $\eta_{i+1}$-terms'') vanish and the remaining coefficients are successively determined by the following recursion formula.
\begin{align*}
&V_{A\,A_1\ldots A_k\,B_1\ldots B_l}\\
&\qquad=\frac{(-1)^{d+1}\lambda_{(i+1)\alpha}}{(k+1)c_0\scal{i+1}{i}}\left(-q_A^{\alpha}(V_{A_1\ldots A_k\,B_1\ldots B_l})
+iX_{(i+1)AB_l}^{\alpha(1)}V_{A_1\ldots A_k\,B_1\ldots B_{l-1}}\right.\\
&\qquad\qquad\qquad\left.+iX_{(i+1)AB_{l-1}B_l}^{\alpha(2)}V_{A_1\ldots A_k\,B_1\ldots B_{l-2}}
+iX_{(i+1)AB_{l-2}B_{l-1}B_l}^{\alpha(3)}V_{A_1\ldots A_k\,B_1\ldots B_{l-3}}\right.\\
&\qquad\qquad\qquad\left.-i(-1)^lV_{A_1\ldots A_{k-1}\,B_1\ldots B_l}X_{iAA_k}^{\alpha(1)}
-i(-1)^dV_{A_1\ldots A_{k-2}\,B_1\ldots B_l}X_{iAA_{k-1}A_k}^{\alpha(2)}\right.\\
&\qquad\qquad\qquad\left.-i(-1)^lV_{A_1\ldots A_{k-3}\,B_1\ldots B_l}X_{iAA_{k-2}A_{k-1}A_k}^{\alpha(3)}\right)
\end{align*}
where $d=k+l$.
\begin{proof}
For calculations, it is easier to work with an expansion where the
generators $\eta_i$ and $\eta_{i+1}$ can stand in any order:
\begin{align*}
\mV_{i,i+1}=\sum_{d=0}^8C_{B_1\ldots B_d}^{j_1\ldots j_d}\eta_{j_1}^{B_1}\ldots\eta_{j_d}^{B_d}\;,\quad
V_{A_1\ldots A_k\;B_1\ldots B_l}
=\left(\arr{c}{k+l\\k}\right)C^i_{A_1}{}^{\ldots}_{\ldots}{}^i_{A_k}{}^{i+1}_{B_1}{}_{\ldots}^{\ldots}{}^{i+1}_{B_l}
\end{align*}
with $j_i\in\{i,i+1\}$.
Now, applying from the left a fixed $\dd{\eta_k^A}$ in the $C$-expansion
kills the corresponding $\eta$ terms which can occur at every position, thus giving a
symmetry factor of $d$ and a sign such that
\begin{align*}
\mQ^{\alpha}_A(\mV_{i,i+1})
&=\sum_{d=0}^8\left(q_A^{\alpha}(C_{B_1\ldots B_d}^{j_1\ldots j_d})
+c_0(d+1)\,(-1)^{\abs{C_{AB_1\ldots B_d}^{kj_1\ldots j_d}}}
\lambda_k^{\alpha}\,C_{AB_1\ldots B_d}^{kj_1\ldots j_d}\right)\eta_{j_1}^{B_1}\ldots\eta_{j_d}^{B_d}
\end{align*}
(\ref{eqnSusyV}) is thus equivalent to the recursion formula
\begin{align*}
&c_0(d+1)\,(-1)^{\abs{C_{AB_1\ldots B_d}^{kj_1\ldots j_d}}}
\lambda_k^{\alpha}\,C_{AB_1\ldots B_d}^{kj_1\ldots j_d}\eta_{j_1}^{B_1}\ldots\eta_{j_d}^{B_d}\\
&\qquad=-q_A^{\alpha}(C_{B_1\ldots B_d}^{j_1\ldots j_d})\eta_{j_1}^{B_1}\ldots\eta_{j_d}^{B_d}
+i\sum_{k+l=d}({X^{\alpha}_{i+1\,A}}|_{\eta^k}{\mV_{i,i+1}}|_{\eta^l}-{\mV_{i,i+1}}|_{\eta^k}{X^{\alpha}_{iA}}|_{\eta^l})
\end{align*}
By induction, one shows that the coefficients are of parity $\abs{C_{B_1\ldots B_d}^{j_1\ldots j_d}}\equiv_2d$.
Also by induction, we see that all coefficients $C_{B_1\ldots B_d}^{i+1\ldots i+1}=0$ vanish:
In the recursion formula so far established, we consider the case $j_1=\ldots j_d=i+1$ and multiply both sides with $\lambda_{i\alpha}$.
Then only the left hand side with $k=i+1$ remains and
\begin{align*}
C_{AB_1\ldots B_d}^{i+1,i+1\ldots i+1}\eta_{i+1}^{B_1}\ldots\eta_{i+1}^{B_d}
=\frac{(-1)^{d+1}\lambda_{i\alpha}}{\scal{i}{i+1}c_0(d+1)}
\left(-q_A^{\alpha}(C_{B_1\ldots B_d}^{i+1\ldots i+1})\eta_{i+1}^{B_1}\ldots\eta_{i+1}^{B_d}\right)
\end{align*}
since $\lambda_{i\alpha}X^{\alpha}_{i+1\,A}=0$ and $X^{\alpha}_{i\,A}=\mO(\eta_i)$.
For $d=0$, the right hand side $\sim q_A^{\alpha}(1)=0$ vanishes and thus $C_B^{i+1}=0$.
Take this as induction basis and assume that $C_{B_1\ldots B_d}^{i+1\ldots i+1}=0$. The same
recursion formula then implies that $C_{AB_1\ldots B_d}^{i+1,i+1\ldots i+1}=0$.
Now, by multiplying both sides of the recursion formula with $\lambda_{(i+1)\alpha}$, only the left hand side
with $k=i$ remains, and we yield
\begin{align*}
&C_{AB_1\ldots B_d}^{ij_1\ldots j_d}\eta_{j_1}^{B_1}\ldots\eta_{j_d}^{B_d}\\
&\qquad=\frac{(-1)^{d+1}\lambda_{(i+1)\alpha}}{\scal{i+1}{i}c_0(d+1)}
\left(-q_A^{\alpha}(C_{B_1\ldots B_d}^{j_1\ldots j_d})\eta_{j_1}^{B_1}\ldots\eta_{j_d}^{B_d}
+(iX^{\alpha}_{i+1\,A}\mV_{i,i+1}-i\mV_{i,i+1}X^{\alpha}_{iA})|_{\eta^d}\right)
\end{align*}
Writing the second term on the right hand side in the $C$-expansion and then translating everything back
to the original expansion (\ref{eqnVCoefficients}) using
\begin{align*}
C_{AB_1\ldots B_d}^{ij_1\ldots j_d}\eta_{j_1}^{B_1}\ldots\eta_{j_d}^{B_d}|_{\eta_i^k\eta_{i+1}^l}
=\frac{k+1}{d+1}V_{A\,A_1\ldots A_k\,B_1\ldots B_l}\eta_i^{A_1}\ldots\eta_i^{A_k}\,\eta_{i+1}^{B_1}\ldots\eta_{i+1}^{B_l}
\end{align*}
the statement is finally obtained.
\end{proof}
\section{Vertex Operators}
By the recursion formula of the previous section, the coefficients of the vertex
operators in the expansion (\ref{eqnVCoefficients}) can be explicitly calculated.
Up to order three, the result reads
\begin{align*}
\mV_{i,i+1}&=1-\frac{\sqrt{2}\,gi_{\pm}}{c_0^2i_-i_+}\ophi_{A_1A_2}\,\eta_i^{A_1}\eta_i^{A_2}
+\frac{2\sqrt{2}\,g}{c_0^2i_+}\ophi_{A_1B_1}\,\eta_i^{A_1}\eta_{i+1}^{B_1}\\
&\qquad+\frac{2ig}{3c_0^3}\frac{i_{\pm}\left(-i_-\lambda_{(i+1)\gamma}+i_+\lambda_{(i-1)\gamma}\right)\psi^{\gamma C}}
{i_-^2i_+^2}\varepsilon_{A_1A_2A_3C}\,\eta_i^{A_1}\eta_i^{A_2}\eta_i^{A_3}\\
&\qquad+\frac{2ig\lambda_{(i+1)\gamma}\psi^{\gamma C}}{c_0^3i_+^2}\varepsilon_{A_1A_2B_1C}\,\eta_i^{A_1}\eta_i^{A_2}\eta_{i+1}^{B_1}
-\frac{2ig\lambda_{i\gamma}\psi^{\gamma C}}{c_0^3i_+^2}\varepsilon_{A_1B_1B_2C}\,\eta_i^{A_1}\eta_{i+1}^{B_1}\eta_{i+1}^{B_2}\\
&\qquad+\mO(\eta^4)
\end{align*}
with $i_-:=\scal{i}{i-1}$, $i_+:=\scal{i+1}{i}$ and $i_{\pm}:=\scal{i+1}{i-1}$.
\subsubsection*{Fourth Order}
The (non-vanishing) fourth order coefficients (\ref{eqnVCoefficients}) of $\mV_{i,i+1}$
are as follows.
\begin{align*}
V_{A_1A_2A_3A_4}
&=\left(\frac{gi_{\pm}\left(i_-^2\lambda_{(i+1)\beta}\lambda_{(i+1)\gamma}-i_-i_+\lambda_{(i-1)\beta}\lambda_{(i+1)\gamma}
+i_+^2\lambda_{(i-1)\beta}\lambda_{(i-1)\gamma}\right)F^{\beta\gamma}}{12c_0^4i_-^3i_+^3}\right.\\
&\qquad+\left.\frac{g^2i_{\pm}^2\ophi_{CD}\phi^{CD}}{12c_0^4i_-^2i_+^2}\right)\varepsilon_{A_1A_2A_3A_4}\\
V_{A_1A_2A_3B_1}
&=-\frac{g\lambda_{(i+1)\beta}\lambda_{(i+1)\gamma}F^{\beta\gamma}}{3c_0^4i_+^3}\varepsilon_{A_1A_2A_3B_1}
-\frac{4g^2i_{\pm}}{c_0^4i_-i_+^2}\ophi_{A_1B_1}\ophi_{A_2A_3}\\
V_{A_1A_2B_1B_2}
&=\frac{g\lambda_{i\beta}\lambda_{(i+1)\gamma}F^{\beta\gamma}}{2c_0^4i_+^3}\varepsilon_{A_1A_2B_1B_2}
-\frac{g^2}{c_0^4i_+^2}\scal[[]{\ophi_{A_1A_2}}{\ophi_{B_1B_2}}
-\frac{4g^2}{c_0^4i_+^2}\ophi_{A_1B_1}\ophi_{A_2B_2}\\
V_{A_1B_1B_2B_3}
&=-\frac{g\lambda_{i\beta}\lambda_{i\gamma}F^{\beta\gamma}}{3c_0^4i_+^3}\varepsilon_{A_1B_1B_2B_3}
\end{align*}
\subsubsection*{Fifth Order}
\begin{align*}
V_{A_1A_2A_3A_4B_1}
&=\frac{i\sqrt{2}\,g^2i_{\pm}}{3c_0^5i_-^2i_+^3}
\left(4(i_-\lambda_{(i+1)\gamma}-i_+\lambda_{(i-1)\gamma})
\varepsilon_{A_2A_3A_4C}\ophi_{A_1B_1}\psi^{\gamma C}\right.\\
&\qquad\qquad\qquad\left.
-6i_-\lambda_{(i+1)\gamma}\psi^{\gamma C}\varepsilon_{A_1A_2B_1C}\ophi_{A_3A_4}\right)\\
V_{A_1A_2A_3B_1B_2}
&=\frac{i\sqrt{2}\,g^2\lambda_{(i+1)\beta}}{3c_0^5i_+^3}\left(\varepsilon_{A_2A_3B_1B_2}\scal[[]{\ophi_{A_1C}}{\psi^{\beta C}}
+\varepsilon_{A_1A_2A_3C}\scal[[]{\ophi_{B_1B_2}}{\psi^{\beta C}}\right.\\
&\qquad\qquad\qquad\qquad-\varepsilon_{A_1B_1B_2C}\scal[[]{\ophi_{A_2A_3}}{\psi^{\beta C}}
-4\varepsilon_{A_1A_2B_1C}\psi^{\beta C}\ophi_{A_3B_2}\\
&\qquad\qquad\qquad\qquad\left.-8\varepsilon_{A_2A_3B_1C}\ophi_{A_1B_2}\psi^{\beta C}\right)\\
&\qquad+\frac{2i\sqrt{2}\,g^2i_{\pm}\lambda_{i\gamma}}{c_0^5i_-i_+^3}\varepsilon_{A_1B_1B_2C}\psi^{\gamma C}\ophi_{A_2A_3}\\
V_{A_1A_2B_1B_2B_3}
&=\frac{2i\sqrt{2}\,g^2\lambda_{i\gamma}}{3c_0^5i_+^3}
\left(-\scal[[]{\ophi_{A_1C}}{\psi^{\gamma C}}\varepsilon_{A_2B_1B_2B_3}
+3\scal[[]{\ophi_{A_1B_1}}{\psi^{\gamma C}}_+\varepsilon_{A_2B_2B_3C}\right)
\end{align*}
where $\scal[[]{X}{Y}_+:=XY+YX$ denotes the anticommutator.
\subsubsection*{Sixth Order}
\begin{align*}
V_{A_1A_2A_3A_4B_1B_2}
&=-\frac{\sqrt{2}g^2\lambda_{(i+1)\alpha}\lambda_{(i+1)\beta}}{24c_0^6i_+^4}
\varepsilon_{A_1A_2A_3A_4}\scal[[]{\ophi_{B_1B_2}}{F^{\beta\alpha}}\\
&\qquad+\frac{\sqrt{2}g^2\lambda_{(i+1)\alpha}\lambda_{(i+1)\beta}}{6c_0^6i_+^4}
\varepsilon_{A_1A_2A_3B_1}\left(F^{\beta\alpha}\ophi_{A_4B_2}+3\ophi_{A_4B_2}F^{\beta\alpha}\right)\\
&\qquad-\frac{\sqrt{2}g^2i_{\pm}}{2c_0^6i_-i_+^4}\lambda_{i\beta}\lambda_{(i+1)\gamma}F^{\beta\gamma}\ophi_{A_1A_4}\varepsilon_{A_2A_3B_1B_2}\\
&\qquad+\frac{\sqrt{2}\,g^3i_{\pm}}{2c_0^6i_-i_+^3}
\left(2\scal[[]{\ophi_{A_1A_2}}{\ophi_{B_1B_2}}\ophi_{A_3A_4}+8\ophi_{A_2B_1}\ophi_{A_3B_2}\ophi_{A_1A_4}\right)\\
&\qquad+\frac{g^2}{3c_0^6i_-^2i_+^4}\left(\varepsilon_{A_2A_3A_4C}\varepsilon_{A_1B_1B_2D}
(i_-^2\lambda_{(i+1)\gamma}\lambda_{(i+1)\delta})\right.\\
&\qquad\qquad\qquad+\varepsilon_{A_1B_1B_2C}\varepsilon_{A_2A_3A_4D}
(i_-^2\lambda_{(i+1)\gamma}\lambda_{(i+1)\delta}+4i_-i_{\pm}\lambda_{i\gamma}\lambda_{(i+1)\delta}\\
&\qquad\qquad\qquad\qquad\qquad\qquad\qquad\qquad-4i_+i_{\pm}\lambda_{i\gamma}\lambda_{(i-1)\delta})\\
&\qquad\qquad\qquad+\left.\varepsilon_{A_2A_3B_1C}\varepsilon_{A_1A_4B_2D}
(6i_-^2\lambda_{(i+1)\gamma}\lambda_{(i+1)\delta})\right)\psi^{\gamma C}\psi^{\delta D}
\end{align*}
and
\begin{align*}
V_{A_1A_2A_3B_1B_2B_3}
&=\frac{\sqrt{2}\,g^2\lambda_{i\gamma}\lambda_{(i+1)\alpha}}{9c_0^6i_+^4}
\left(\scal[[]{\ophi_{A_1A_2}}{F^{\gamma\alpha}}\varepsilon_{A_3B_1B_2B_3}\right.\\
&\qquad\qquad\qquad\left.+3\scal[[]{\ophi_{A_2B_1}}{F^{\gamma\alpha}}_+\varepsilon_{A_3B_2B_3A_1}
+3\ophi_{A_1B_3}F^{\gamma\alpha}\varepsilon_{A_2A_3B_1B_2}\right)\\
&\qquad+\frac{\sqrt{2}\,g^2i_{\pm}\lambda_{i\gamma}\lambda_{i\beta}F^{\gamma\beta}\ophi_{A_1A_2}}{3c_0^6i_-i_+^4}\varepsilon_{A_3B_1B_2B_3}\\
&\qquad+\frac{2\sqrt{2}\,g^3}{18c_0^6i_+^3}\left(\scal[[]{\ophi_{A_2C}}{\scal[[]{\ophi_{A_1D}}{\ophi_{EF}}}\varepsilon_{CDEF}\varepsilon_{A_3B_1B_2B_3}\right.\\
&\qquad\qquad\qquad+6\scal[[]{\ophi_{A_2B_1}}{\scal[[]{\ophi_{A_1A_3}}{\phi_{B_2B_3}}}_+\\
&\qquad\qquad\qquad\left.-6\ophi_{A_1B_3}\scal[[]{\ophi_{A_2A_3}}{\ophi_{B_1B_2}}
-24\ophi_{A_1B_3}\ophi_{A_2B_1}\ophi_{A_3B_2}\right)\\
&\qquad+\frac{4g^2\lambda_{i\gamma}\lambda_{(i+1)\alpha}}{9c_0^6i_+^4}
\left(-\varepsilon_{A_1A_2CD}\varepsilon_{A_3B_1B_2B_3}\scal[[]{\psi^{\alpha D}}{\psi^{\gamma C}}_+\right.\\
&\qquad\qquad\qquad+3\varepsilon_{A_3B_2B_3C}\varepsilon_{A_1A_2B_1D}\scal[[]{\psi^{\alpha D}}{\psi^{\gamma C}}\\
&\qquad\qquad\qquad\left.-3\varepsilon_{A_1B_2B_3C}\varepsilon_{A_2A_3B_1D}\psi^{\gamma C}\psi^{\alpha D}\right)\\
V_{A_1A_2B_1B_2B_3B_4}
&=\frac{\sqrt{2}\,g^2\lambda_{i\gamma}\lambda_{i\beta}\scal[[]{F^{\gamma\beta}}{\ophi_{A_1B_1}}_+\varepsilon_{A_2B_2B_3B_4}}{3c_0^6i_+^4}\\
&\qquad+\frac{2g^2\lambda_{i\gamma}\lambda_{i\delta}\psi^{\gamma C}\psi^{\delta D}}{c_0^6i_+^4}\varepsilon_{A_1B_3B_4C}\varepsilon_{A_2B_1B_2D}
\end{align*}
\subsubsection*{Seventh Order}
\begin{align*}
V_{A_1A_2A_3B_1B_2B_3B_4}
&=\frac{2ig^2\lambda_{i\gamma}\lambda_{i\beta}\lambda_{(i+1)\epsilon}}{9c_0^7i_+^5}
\varepsilon_{A_1A_2B_1C}\varepsilon_{A_3B_2B_3B_4}\left(2F^{\gamma\beta}\psi^{\epsilon C}+\psi^{\epsilon C}F^{\gamma\beta}\right)\\
&\qquad-\frac{ig^2\lambda_{i\epsilon}\lambda_{i\beta}\lambda_{(i+1)\gamma}}{3c_0^7i_+^5}
\varepsilon_{A_1B_1B_2C}\varepsilon_{A_2A_3B_3B_4}\left(F^{\gamma\beta}\psi^{\epsilon C}+2\psi^{\epsilon C}F^{\gamma\beta}\right)\\
&\qquad+\frac{8ig^3\lambda_{i\gamma}\scal[[]{\scal[[]{\ophi_{A_1C}}{\psi^{\gamma C}}}{\ophi_{A_2B_1}}_+\varepsilon_{A_3B_2B_3B_4}}{9c_0^7i_+^4}\\
&\qquad+\frac{2ig^3\lambda_{i\gamma}}{3c_0^7i_+^4}\varepsilon_{A_3B_3B_4D}
\scal[[]{\scal[[]{\ophi_{A_1A_2}}{\ophi_{B_1B_2}}}{\psi^{\gamma D}}_+\\
&\qquad+\frac{8ig^3\lambda_{i\gamma}}{9c_0^7i_+^4}\ophi_{A_1B_4}
\left(\scal[[]{\ophi_{A_2C}}{\psi^{\gamma C}}\varepsilon_{A_3B_1B_2B_3}
-3\scal[[]{\ophi_{A_2B_1}}{\psi^{\gamma C}}_+\varepsilon_{A_3B_2B_3C}\right)\\
&\qquad+\frac{2ig^3\lambda_{i\gamma}}{3c_0^7i_+^4}\varepsilon_{A_1B_3B_4C}\psi^{\gamma C}\scal[[]{\ophi_{A_2A_3}}{\ophi_{B_1B_2}}\\
&\qquad+\frac{8ig^3\lambda_{i\gamma}}{3c_0^7i_+^4}\varepsilon_{A_1B_3B_4C}\psi^{\gamma C}\ophi_{A_2B_1}\ophi_{A_3B_2}
\end{align*}
and
\begin{align*}
V_{A_1A_2A_3A_4B_1B_2B_3}&=V_{A_1A_2A_3A_4B_1B_2B_3}|_{\phi\phi\psi}+V_{A_1A_2A_3A_4B_1B_2B_3}|_{F\psi}
\end{align*}
with
\begin{align*}
&V_{A_1A_2A_3A_4B_1B_2B_3}|_{\phi\phi\psi}\\
&\qquad=\frac{ig^3\lambda_{(i+1)\beta}}{9c_0^7i_+^4}
\left(4\varepsilon_{A_4B_1B_2B_3}\scal[[]{\ophi_{A_1A_2}}{\scal[[]{\ophi_{A_3C}}{\psi^{\beta C}}}-2\varepsilon_{A_3A_4B_2B_3}\scal[[]{\ophi_{A_1B_1}}{\scal[[]{\ophi_{A_2C}}{\psi^{\beta C}}}_+\right.\\
&\qquad\qquad\qquad\qquad
-\varepsilon_{A_4B_1B_2B_3}\scal[[]{\psi^{\beta C}}{\scal[[]{\ophi_{A_1A_2}}{\ophi_{A_3C}}}
-2\varepsilon_{A_4B_1B_2B_3}\scal[[]{\ophi_{A_3C}}{\scal[[]{\psi^{\beta C}}{\ophi_{A_1A_2}}}\\
&\qquad\qquad\qquad\qquad
+3\varepsilon_{A_1A_2A_4C}\scal[[]{\ophi_{A_3B_1}}{\scal[[]{\psi^{\beta C}}{\phi_{B_2B_3}}}_++3\varepsilon_{A_1B_2B_3C}\scal[[]{\ophi_{A_3B_1}}{\scal[[]{\ophi_{A_2A_4}}{\psi^{\beta C}}}_+\\
&\qquad\qquad\qquad\qquad-\varepsilon_{A_4B_1B_2B_3}\scal[[]{\psi^{\beta C}}{\scal[[]{\ophi_{A_1A_2}}{\ophi_{A_3C}}}
-9\varepsilon_{A_2A_3B_1C}\scal[[]{\psi^{\beta C}}{\scal[[]{\ophi_{A_1A_4}}{\ophi_{B_2B_3}}}_+\\
&\qquad\qquad\qquad\qquad-12\varepsilon_{A_1A_2B_3C}\psi^{\beta C}\ophi_{A_3B_1}\ophi_{A_4B_2}\\
&\qquad\qquad\qquad\qquad+\ophi_{A_1B_3}\left(6\varepsilon_{A_2A_3A_4C}\scal[[]{\psi^{\beta C}}{\ophi_{B_1B_2}}
+6\varepsilon_{A_2B_1B_2C}\scal[[]{\ophi_{A_3A_4}}{\psi^{\beta C}}\right.\\
&\qquad\qquad\qquad\qquad\qquad\qquad+24\varepsilon_{A_2A_3B_1C}\scal[[]{\psi^{\beta C}}{\ophi_{A_4B_2}}_++12\varepsilon_{A_3A_4B_1C}\ophi_{A_2B_2}\psi^{\beta C}\\
&\qquad\qquad\qquad\qquad\qquad\qquad\left.\left.-5\varepsilon_{A_3A_4B_1B_2}\scal[[]{\ophi_{A_2C}}{\psi^{\beta C}}\right)\right)\\
&\qquad\qquad+\frac{4ig^3i_{\pm}\lambda_{i\gamma}}{3c_0^7i_-i_+^4}
\left(\varepsilon_{A_4B_1B_2B_3}\scal[[]{\ophi_{A_1C}}{\psi^{\gamma C}}\ophi_{A_2A_3}
-3\varepsilon_{A_2B_1B_2C}\scal[[]{\ophi_{A_1B_3}}{\psi^{\gamma C}}_+\ophi_{A_3A_4}\right)
\end{align*}
and
\begin{align*}
&V_{A_1A_2A_3A_4B_1B_2B_3}|_{F\psi}\\
&\qquad=\frac{ig^2\lambda_{(i+1)\alpha}\lambda_{i\gamma}\lambda_{(i+1)\beta}}{18c_0^7i_+^5}\left(9\varepsilon_{A_1A_2B_1C}\varepsilon_{A_3A_4B_2B_3}\scal[[]{\psi^{\beta C}}{F^{\gamma\alpha}}_++6\varepsilon_{A_2B_2B_3C}\varepsilon_{A_3A_4B_1A_1}\psi^{\gamma C}F^{\beta\alpha}\right.\\
&\qquad\qquad\qquad\qquad\left.+\varepsilon_{A_1A_2A_3C}\varepsilon_{A_4B_1B_2B_3}\scal[[]{F^{\beta\alpha}}{\psi^{\gamma C}}
+3\varepsilon_{A_4B_2B_3C}\varepsilon_{A_2A_3B_1A_1}\scal[[]{F^{\beta\alpha}}{\psi^{\gamma C}}_+\right)\\
&\qquad\qquad+\frac{2ig^2i_{\pm}\lambda_{(i+1)\delta}\lambda_{i\gamma}\lambda_{i\beta}}{9c_0^7i_-i_+^5}\varepsilon_{A_1A_2A_3C}\varepsilon_{A_4B_1B_2B_3}F^{\gamma\beta}\psi^{\delta C}\\
&\qquad\qquad+\frac{2ig^2i_{\pm}\lambda_{i\beta}\lambda_{i\gamma}\lambda_{(i-1)\delta}}{9c_0^7i_-^2i_+^4}
\varepsilon_{A_1B_1B_2B_3}\varepsilon_{A_2A_3A_4C}F^{\beta\gamma}\psi^{\delta C}
\end{align*}
\subsubsection*{Eighth Order}
\begin{align*}
V_{A_1A_2A_3A_4B_1B_2B_3B_4}&=V_{A_1A_2A_3A_4B_1B_2B_3B_4}|_{\phi^4}+V_{A_1A_2A_3A_4B_1B_2B_3B_4}|_{F\phi\phi}\\
&\qquad+V_{A_1A_2A_3A_4B_1B_2B_3B_4}|_{\phi\psi\psi}+V_{A_1A_2A_3A_4B_1B_2B_3B_4}|_{FF}
\end{align*}
with
\begin{align*}
V_{A_1A_2A_3A_4B_1B_2B_3B_4}|_{\phi^4}
&=\frac{g^4}{18c_0^8i_+^4}
\left(-2\varepsilon_{DEFG}\varepsilon_{A_4B_2B_3B_4}\scal[[]{\scal[[]{\ophi_{A_2D}}{\scal[[]{\ophi_{A_1E}}{\ophi_{FG}}}_-}{\ophi_{A_3B_1}}_+\right.\\
&\qquad\qquad\qquad+3\scal[[]{\scal[[]{\ophi_{A_2A_3}}{\ophi_{B_1B_2}}}{\scal[[]{\ophi_{A_1A_4}}{\ophi_{B_3B_4}}}_+\\
&\qquad\qquad\qquad-2\ophi_{A_2B_4}\scal[[]{\ophi_{A_3D}}{\scal[[]{\ophi_{A_1E}}{\ophi_{FG}}}\varepsilon_{A_4B_1B_2B_3}\varepsilon_{DEFG}\\
&\qquad\qquad\qquad-12\ophi_{A_2B_4}\scal[[]{\ophi_{A_3B_1}}{\scal[[]{\ophi_{A_1A_4}}{\ophi_{B_2B_3}}}_+\\
&\qquad\qquad\qquad+3\scal[[]{\ophi_{A_1A_2}}{\ophi_{B_3B_4}}\scal[[]{\ophi_{A_3A_4}}{\ophi_{B_1B_2}}\\
&\qquad\qquad\qquad+12\scal[[]{\ophi_{A_1A_2}}{\ophi_{B_3B_4}}\ophi_{A_3B_1}\ophi_{A_4B_2}\\
&\qquad\qquad\qquad+2\ophi_{A_1B_4}\scal[[]{\ophi_{A_3C}}{\scal[[]{\ophi_{A_2D}}{\ophi_{EF}}}\varepsilon_{CDEF}\varepsilon_{A_4B_1B_2B_3}\\
&\qquad\qquad\qquad+12\ophi_{A_1B_4}\scal[[]{\ophi_{A_3B_1}}{\scal[[]{\ophi_{A_2A_4}}{\phi_{B_2B_3}}}_+\\
&\qquad\qquad\qquad-12\ophi_{A_1B_4}\ophi_{A_2B_3}\scal[[]{\ophi_{A_3A_4}}{\ophi_{B_1B_2}}\\
&\qquad\qquad\qquad\left.-48\ophi_{A_1B_4}\ophi_{A_2B_3}\ophi_{A_3B_1}\ophi_{A_4B_2}\right)
\end{align*}
and
\begin{align*}
&V_{A_1A_2A_3A_4B_1B_2B_3B_4}|_{F\phi\phi}\\
&\qquad=\frac{g^3\lambda_{i\beta}\lambda_{(i+1)\gamma}}{36c_0^8i_+^5}
\left(-9\varepsilon_{A_3A_4B_3B_4}\scal[[]{F^{\gamma\beta}}{\scal[[]{\ophi_{A_1A_2}}{\ophi_{B_1B_2}}}_+\right.\\
&\qquad\qquad\qquad\qquad+4\varepsilon_{A_4B_2B_3B_4}\scal[[]{\scal[[]{\ophi_{A_2A_1}}{F^{\gamma\beta}}_-}{\ophi_{A_3B_1}}_+\\
&\qquad\qquad\qquad\qquad+8\ophi_{A_1B_4}\left(\scal[[]{\ophi_{A_2A_3}}{F^{\gamma\beta}}\varepsilon_{A_4B_1B_2B_3}
+3\scal[[]{\ophi_{A_2B_1}}{F^{\gamma\beta}}_+\varepsilon_{A_3B_2B_3A_4}\right)\\
&\qquad\qquad\qquad\qquad\left.
-12\varepsilon_{A_1A_2B_1B_2}\scal[[]{F^{\gamma\beta}}{\ophi_{A_3B_3}\ophi_{A_4B_4}}_+\right)\\
&\qquad\qquad-\frac{2g^3i_{\pm}\lambda_{i\gamma}\lambda_{i\beta}}{3c_0^8i_-i_+^5}
\varepsilon_{A_1B_1B_2B_3}\scal[[]{F^{\gamma\beta}}{\ophi_{A_2B_4}}_+\ophi_{A_3A_4}
\end{align*}
and
\begin{align*}
&V_{A_1A_2A_3A_4B_1B_2B_3B_4}|_{\phi\psi\psi}\\
&\qquad=\frac{\sqrt{2}\,g^3\lambda_{i\gamma}\lambda_{(i+1)\beta}}{18c_0^8i_+^5}
\left(8\varepsilon_{A_2A_3B_1D}\varepsilon_{A_4B_2B_3B_4}
\scal[[]{\scal[[]{\ophi_{A_1C}}{\psi^{\gamma C}}}{\psi^{\beta D}}_-\right.\\
&\qquad\qquad\qquad\qquad-12\varepsilon_{A_2A_3B_4D}
\varepsilon_{A_4B_2B_3C}\psi^{\beta D}\scal[[]{\ophi_{A_1B_1}}{\psi^{\gamma C}}_+\\
&\qquad\qquad\qquad\qquad-3\varepsilon_{A_2B_1B_2C}\varepsilon_{A_3A_4B_3B_4}
\left(\scal[[]{\ophi_{A_1D}}{\psi^{\beta D}}\psi^{\gamma C}
-2\psi^{\gamma C}\scal[[]{\ophi_{A_1D}}{\psi^{\beta D}}\right)\\
&\qquad\qquad\qquad\qquad+4\varepsilon_{A_4B_2B_3B_4}\left(\varepsilon_{A_1A_2CD}\scal[[]{\scal[[]{\psi^{\beta D}}{\psi^{\gamma C}}_+}{\ophi_{A_3B_1}}_+\right.\\
&\qquad\qquad\qquad\qquad\qquad\qquad\qquad\left.+\varepsilon_{A_1A_3B_1D}\scal[[]{\psi^{\beta D}}{\scal[[]{\ophi_{A_2C}}{\psi^{\gamma C}}}_-\right)\\
&\qquad\qquad\qquad\qquad+3\varepsilon_{A_4B_3B_4C}\left(\varepsilon_{A_1A_2A_3D}\scal[[]{\scal[[]{\psi^{\beta D}}{\ophi_{B_1B_2}}}{\psi^{\gamma C}}_-\right.\\
&\qquad\qquad\qquad\qquad\qquad\qquad\qquad\left.+\varepsilon_{A_1B_1B_2D}\scal[[]{\scal[[]{\ophi_{A_2A_3}}{\psi^{\beta D}}}{\psi^{\gamma C}}_-\right)\\
&\qquad\qquad\qquad\qquad+4\ophi_{A_1B_4}\left(-2\varepsilon_{A_2A_3CD}\varepsilon_{A_4B_1B_2B_3}\scal[[]{\psi^{\beta D}}{\psi^{\gamma C}}_+\right.\\
&\qquad\qquad\qquad\qquad\qquad\qquad\qquad\left.+6\varepsilon_{A_4B_2B_3C}\varepsilon_{A_2A_3B_1D}\scal[[]{\psi^{\beta D}}{\psi^{\gamma C}}_-\right.\\
&\qquad\qquad\qquad\qquad\qquad\qquad\qquad\left.-3\varepsilon_{A_2B_2B_3C}\varepsilon_{A_3A_4B_1D}\psi^{\gamma C}\psi^{\beta D}\right)\\
&\qquad\qquad\qquad\qquad+3\varepsilon_{A_1B_3B_4C}\psi^{\gamma C}
\left(2\varepsilon_{A_2A_3A_4D}\scal[[]{\psi^{\beta D}}{\ophi_{B_1B_2}}
+2\varepsilon_{A_2B_1B_2D}\scal[[]{\ophi_{A_3A_4}}{\psi^{\beta D}}\right.\\
&\qquad\qquad\qquad\qquad\qquad\qquad\qquad
-\varepsilon_{A_2A_3B_1B_2}\scal[[]{\ophi_{A_4D}}{\psi^{\beta D}}
+8\varepsilon_{A_2A_3B_1D}\scal[[]{\ophi_{A_4B_2}}{\psi^{\beta D}}_+\\
&\qquad\qquad\qquad\qquad\qquad\qquad\qquad\left.\left.+4\varepsilon_{A_2A_3B_1D}\ophi_{A_4B_2}\psi^{\beta D}\right)\right)\\
&\qquad\qquad-\frac{2\sqrt{2}g^3i_{\pm}\lambda_{i\beta}\lambda_{i\gamma}}{c_0^8i_-i_+^5}\varepsilon_{A_1B_3B_4C}\varepsilon_{A_2B_1B_2D}\psi^{\gamma C}\psi^{\beta D}\ophi_{A_3A_4}
\end{align*}
and
\begin{align*}
V_{A_1A_2A_3A_4B_1B_2B_3B_4}|_{FF}
&=-\frac{g^2\lambda_{i\gamma}\lambda_{i\beta}\lambda_{(i+1)\epsilon}\lambda_{(i+1)\alpha}}{36c_0^8i_+^6}\varepsilon_{A_1A_2A_3B_1}\varepsilon_{A_4B_2B_3B_4}
\left(3F^{\gamma\beta}F^{\epsilon\alpha}+F^{\epsilon\alpha}F^{\gamma\beta}\right)\\
&\qquad+\frac{g^2\lambda_{i\epsilon}\lambda_{i\beta}\lambda_{(i+1)\gamma}\lambda_{(i+1)\alpha}}{8c_0^8i_+^6}\varepsilon_{A_1A_2B_1B_2}\varepsilon_{A_3A_4B_3B_4}F^{\gamma\beta}F^{\epsilon\alpha}
\end{align*}
\subsubsection*{The General Structure}
From the above formulas for $\mV_{i\,i+1}$, we see that higher order terms factor into terms
with the structure of lower order terms:
\begin{align*}
\mV_{i,i+1}\sim\sum\prod
\left(1+\ophi\cdot\eta^2+\frac{1}{\sqrt{g}}\psi\cdot\eta^3+\frac{1}{g}F\cdot\eta^4\right)
\end{align*}
It is understood that this is not an equation but only a similarity which helps memorise the types of terms occurring.
\section*{Acknowledgements}
I would like to thank Jan Plefka, Johannes Henn and Konstantin Wiegandt for
interesting discussions.
|
{
"timestamp": "2012-11-06T02:04:03",
"yymm": "1206",
"arxiv_id": "1206.6127",
"language": "en",
"url": "https://arxiv.org/abs/1206.6127"
}
|
\section{Introduction}
The radial velocity (RV) technique is the most successful exoplanet detection method, with more than 600 planets\footnote{The Extrasolar Planets Encyclopedia; http://www.exoplanet.eu} detected to date. This technique monitors the variation of the RV of the star due to the gravitational pull of an unseen companion. Owing that the RV of the planet is not measured directly, neither the orbital inclination $i$ of the planet nor its exact mass can be determined. Instead, only an estimation of the minimum mass of the planet can be derived by applying Kepler's Laws, which give $m_{\rm p, min}=m_{\rm p}\sin i$, where $m_{\rm p}$ is the unknown planetary mass. For transiting planets - i.e. for planets which periodically occult their host stars - the orbital inclination $i$ and therefore the exact planetary mass can be measured. However, the vast majority of extrasolar planets found to date are non-transiting and therefore without an exactly determined mass. Consequently, many of these non-transiting planets, especially the ones with largest minimum masses, can only be labelled as {\it planetary candidates}, owing to the case that for very low orbital inclination they might turn out to be brown dwarfs.
One strategy to determine the exact mass of non-transiting planets is to directly measure the planetary RV signal via high-resolution (i.e. $R=\lambda / \Delta \lambda > 40,000$; $\lambda$ denotes the wavelength) spectroscopy with very large telescopes. Key to this method is to observe a large number of spectral features coming from the planet and to observe them at different orbital phases so the traveling faint planetary signal can be disentangled from the dominating stellar one. In the past, several high-resolution spectroscopy campaigns at the optical with the goal to detect starlight reflected from hot Jupiters (i.e. massive planets that are a few stellar radii away from their host stars) and to measure their exact masses were carried out. Although all of them resulted in non-detections of reflected light, these campaigns confirmed the low reflectivity of hot Jupiters at visual wavelengths (e.g. \citealp{1999ApJ...522L.145C,2002MNRAS.330..187C,2003MNRAS.344.1271L,2008A&A...485..859R}). Towards near-infrared (NIR) wavelengths, the planet-to-star flux ratios drastically increase due to the strong thermal emission of hot Jupiters. \cite{2001ApJ...546.1068W,2007MNRAS.379.1097B,bar08,bar10} and \cite{cub11} observed hot Jupiters by means of high-resolution spectroscopy at near-infrared wavelengths, but were not able to detect any molecules in their atmospheres.
The molecule carbon monoxide (CO) is one key to detect the radial velocity of an exoplanet, since it exhibits a dense forest of deep absorption lines in a
spectral band around 2.3~$\mu$m. Models predict CO to be one of the most
abundant molecules in hot gas giant exoplanets \citep{2006ApJ...649.1048C,2007ApJS..168..140S}. To test those models, some papers have already been published reporting the
detection or suggesting the presence of CO in the atmosphere of
some transiting hot Jupiters \citep{2008Natur.456..767G,2009ApJ...690L.114S,2009ApJ...699..478D,2010ApJ...712L.139T}. All these measurements were obtained with very low-resolution (R $<$ 40) HST NICMOS
NIR spectra or broad Spitzer photometry beween 3.6 and 24
$\mu$m, both via transmission (primary transit) or emission (secondary
eclipse) observations. \cite{Sne10}
reported the detection of CO
via the analysis of the transmission spectrum of the transiting planet HD~209458b
using high-resolution spectroscopy between 2.30 and 2.33~$\mu$m. This allowed these authors to directly measure the RV of a transiting hot Jupiter for the very first time. We note that all those CO detections were done on transiting planets, but neither atmospheric chemicals nor RV shifts have yet been detected for non-transiting exoplanets.
Here we present the results of our attempt to detect the motion of the non-transiting planet $\tau$ Boo b via observations of its atmospheric CO spectrum in the NIR, by using a technique similar to \cite{Sne10}. The main goal of our study has been the measurement of the orbital inclination and the absolute mass of the planet, but as a by-product we can also confirm the presence of CO.
\section{The planetary system of $\tau$ Boo} \label{S2}
Tau Boo b is a massive hot Jupiter, orbiting
its host star every 3.31 days, and is estimated to have an atmospheric
temperature above 1800~K following the formalism in \cite{2007ApJ...667L.191L} assuming zero albedo and no energy redistribution from day-to-night side. Table~\ref{tab:tauboo} summarizes the
parameters of the planet and its very bright host star. We updated the orbital ephemerides by an RV-analysis of high-resolution UVES spectra of $\tau$ Boo published in \cite{rod10}
\begin{table}
\caption{Parameters of the star $\tau$~Boo and its planetary
companion. Abbreviations for the references are: B06 = \cite{2006ApJ...646..505B} and references therein,
B97~=~\cite{1997ApJ...474L.119B},
G98~=~\cite{1998A&A...334..221G},
H00~=~\cite{2000ApJ...531..415H},
VF05~=~\cite{2005ApJS..159..141V},
VV09~=~\cite{2009ApJ...694.1085V}.}
\label{tab:tauboo}
\begin{center}
\begin{tabular}{l r l l }
\hline\hline
Parameter & Value & Error &Ref. \\
\hline
Star: \\
Spectral type & F7 IV-V & & B97 \\
$K~(mag)$ & 3.36 & 0.05 & VV09\\
$m_{\star}~ (\rm{M_{\odot}})$ & 1.33 & 0.11 & VF05 \\
$R_{\star}~(\rm{R_{\odot}})$ & 1.31 & 0.06 & G98 \\
$T_{\rm eff}$~(K) & 6360 & 80 & G98 \\
$P_{\rm{rot}}~({\rm d})$ & 3.31 & & B97 \\
$v~ \sin i~ (\rm{km~s^{-1}})$ & 14.9 & 0.5 & H00 \\
Age (Gyr) & $1.3$ & 0.4 & VF05 \\
\hline
Planet:\\
$m_{\rm{p}} \sin i~(\rm{M_{\rm{Jup}}})$ & 4.1 & 0.34 & B06 \\
$a~ (\rm{AU})$ & 0.0481 & 0.0028 & B06 \\
$e$ & 0.023 & 0.015 & B06 \\
$K_{\star}~(\rm{km~s^{-1}})$ & 0.4611 & 0.0076 & B06 \\
Orbital period $P_{\rm orb}$ (d) & 3.312458 & 0.00002 & new \\
${T_{\phi=0}}$ (JD) & 2~454~267.497 & 0.0122 & new \\
\hline
\end{tabular}
\end{center}
\end{table}
The maximum possible RV semi-amplitude of the companion $\tau$~Boo~b is calculated to be $K_{\rm{p,max}}=157.3\pm~4.4~\rm{km~s^{-1}}$ via
\begin{equation} \label{equ:doppler12}
K_{\rm{p,max}} = \Big(\frac{2\pi G~m_\star}{P_{\rm orb}}\Big)^{1/3} ~,
\end{equation}
where $G$ is the gravitational constant, $m_\star$ the stellar mass, and $P_{\rm orb}$ the orbital period. Due to the absence of transits
in high-precision photometry (Henry et al. 2000), orbital inclinations larger than $i=83^{\circ}$ can be excluded. Baliunas et al.~(1997) found that the star $\tau$~Boo rotates rapidly with a period
commensurate with the orbital period of the
planet, suggesting tidal locking.
{\citet{cat07} and \citet{don08} carried out spectropolametric observations of $\tau$~Boo and measured differential rotation in the star, confirming that its rotation is synchronized with the orbital motion of the planet and suggesting an orbital inclination around 40$^\circ$. \citet{2003MNRAS.344.1271L} and Rodler et al.~(2010) attempted to measure starlight reflected from $\tau$~Boo~b, and found candidate features of marginal significance, indicating orbital inclinations of $i=37^\circ$ and $41^\circ$, respectively.}
Under the assumption of a tidal lock and that the stellar
equator and the orbital plane are co-aligned, Rodler et al.~(2010) predicted an orbital inclination of $i\approx 46^{\circ}$ and a planetary mass of ${m_{\rm p}\approx 5.6~{\rm M}_{\rm Jup}}$.
\section{Observations and data reduction}
We observed $\tau$ Boo during 5 hours on June 10, 2011, by using CRIRES \citep{2004SPIE.5492.1218K}, mounted on the VLT/UT1 on Cerro Paranal, Chile. We collected a total of 116 high-resolution spectra of our target plus 2 spectra of the A-type star HD~116160, which does not show any intrinsic absorption lines in the observed wavelength regime and therefore could be used for the calibration of our telluric model. The date of the observations was selected in such a way that the observations were carried out at orbital phases between $\phi=0.55$ and 0.61 ($\phi=0$ corresponds to the mid-transit position if $i\approx90^\circ$), when the
{hot day side} of the planet facing the star was largely visible and consequently appeared bright in the NIR. In addition, based on the ephemerides in Table~1 and the value of $K_\star$, we expected a variation of the RV of the companion up to 40~km~s$^{-1}$ during the course of the observations.
The observations were carried out with the CRIRES standard setting of order 24 and a reference wavelength of $\lambda=2.3252~\mu$m. We used the 0.2''~slit to achieve a spectral resolution of $R=100,000$. We made use of the AO-system of CRIRES fo minimize slit losses. Observing conditions were good, with a seeing between 0.6'' and 1.8''. The observations were taken at two different nodding positions along the slit with the intention to remove the faint OH-lines in the sky background. We chose the integration times in such a way that the peak count rates per exposure were low ($\sim4000$ counts) to avoid non-linear sensitivity in the four detectors.
The data were reduced with IRAF\footnote{IRAF Project Home Page:
http://iraf.noao.edu/}. { All four detectors showed a pattern aligned with the reading direction. This pattern consisted
of alternating rows or columns of larger and smaller intensities than the mean value and is called odd-even effect.
We applied a correction for the odd-even effect\footnote{An algorithm to correct this effect is provided at http://www.eso.org/sci/facilities/paranal/instruments/crires/tools/}, which was most present in the data recorded with detectors 1 and 4, for which the reading direction was perpendicular to the spectral dispersion.}
Since after this step strong noise features coming from those two detectors were still present in the data, we discarded those two detectors from any further analysis. The frames were taken in an A-A-B-B-B-B-A-A- sequence, where A and B denote the different nodding positions along the slit. For all frames as well as for each of each two remaining detectors, we combined the two consecutive frames, which had been taken at the same nodding position. This reduced the number of spectra to 58. Nodded images were
then subtracted to remove sky background and dark current. White-light spectra obtained with the same instrumental configuration in the afternoon before and after the observations were used to flat-field the data. Using the {\tt apall} task, we first identified and optimally centered the orders in the two individual nodding frames and traced these orders by adopting a second-order Legendre polynomial along the dispersion axis.
We then extracted the one-dimensional spectra in detectors 2 and 3 and calculated a second-order polynomial wavelength solution by adopting as a reference system the telluric lines present in the spectra. The vacuum wavelengths of the telluric lines were identified using the
{line-by-line radiative transfer model (LBLRTM)}
routine, which is based on the FASCODE algorithm \citep{1992JGR....9715761C}. The detectors 2 and 3 covered the wavelength regions 2.303 to 2.317~$\mu$m and 2.319 to 2.332~$\mu$m, respectively, with a pixel size corresponding to $\approx 1.6$~km~s$^{-1}$. We furthermore identified and removed obviously bad pixel in each spectrum coming from defects of the detector.
\section{Data analysis}
\subsection{Overview}
The observed target spectra were heavily contaminated by telluric lines, and to much less extent, by the rotationally broadened stellar absorption lines of the host star. A crucial step of the data analysis was the removal of the telluric and stellar spectra, and the search for the planetary spectrum in the residuals. In the following, we provide a description of the different data analysis steps.
\subsection{Step 1: Determination of the instrumental profile}
To calculate the instrumental profile (IP) of the spectrograph for each target observation, we made use of the telluric contamination, which dominated our data, and of the telluric model spectra (see Step~2). In contrast to the large number of telluric lines, the stellar spectrum of $\tau$ Boo exhibits very few absorption features in the observed wavelength regime, which appeared rotationally broadened due to the high $v \sin i$ (Table~1). Those regions of stellar absorption features were excluded from the determination of the instrumental profile. We normalized the spectra in such a way that the flux in the telluric/pseudo-stellar continuum was at one, and stored the photon noise error per spectral pixel. We then determined the IP as the sum of seven Gaussian profiles in a similar way as described in \cite{1995PASP..107..966V}: Around a central Gaussian we grouped three satellite Gaussians on each side of it, which allowed us to account for asymmetries in the IP. Free parameters were the width of the central Gaussian, plus the amplitudes of the six satellite Gaussians, while the positions and the widths of those satellites were fixed and set {\it a priori} in a way that their half-widths overlapped. By employing a Brent algorithm, we finally determined the free parameters and convolved the telluric model spectrum with the IP. We found that for all spectra, the shape of the IP remained constant. However, the width of the IP was slightly different at each nodding position and was also changing during the course of the night.
\subsection{Step 2: Telluric model}
For the calculation of the atmospheric transmission spectrum, we used the LBLRTM code, which is available as a Fortran source code\footnote{Source code and manuals are available under {\tt http://rtweb.aer.com/lblrtm\_description.html}}. As molecular database we adopted HITRAN \citep{2005JQSRT..96..139R}, which contains the 42 most prominent molecules and isotopes present in the atmosphere of the Earth. Following the approach presented by \cite{2010A&A...524A..11S}, we created a high-resolution theoretical vacuum-wavelength telluric spectrum for each observed spectrum by accounting for the air mass of the star as well as the weather conditions (water vapour density column, temperature and pressure profiles) during the observations. We retrieved the weather information from the Global Data Assimilation System (GDAS). GDAS models are available in 3 hour intervals for any location around the globe\footnote{GDAS webpage: {\tt http://ready.arl.noaa.gov/READYamet.php}}.
We first calibrated the line depths of the telluric lines by using the A-star observations, which had been taken at the beginning of the night. For five water and methane lines, the line depths were systematically underestimated in the HITRAN model; for these lines we consequently updated the abundances in our telluric model. In addition to that, the amount of water vapor in the atmosphere constantly changed during the observations, which resulted in variations of the line depths of the water lines in the observed spectra. Consequently, for each observed target spectrum, we created a set of two telluric model spectra, one for water and one for the other molecules, most notably CH$_4$. We then globally scaled the line depths for those two models for each target observation. Finally, we multiplied the two different high resolution telluric spectra, convolved it with the IP to create the telluric model.
\subsection{Step 3: Stellar model}
The few stellar absorption lines remained in the data after the removal of the telluric lines. We attempted to subtract the stellar spectra from the data using models, but found that existing models (MARCS, \citealp{2008A&A...486..951G}; PHOENIX, \citealp{1997ApJ...483..390H}) do not reproduce the observed stellar spectrum of $\tau$ Boo. The best alternative was to create a high signal-to-noise ratio template by co-adding all the observed telluric-free spectra.
Due to the expected large orbital motion of the planet, contributions to this template spectrum coming from the planet, which is estimated to be a thousand times fainter than the star, were smeared out to a large extent.
Before combining the individual stellar spectra to one template spectrum, we applied a Savitzky-Golay smoothing algorithm \citep{pre92} to each of them, which ensured that the broad stellar lines remained in the spectra, while the sharp and unseen planetary lines were smoothed out. In the final step, we subtracted the stellar template spectrum from all telluric-free target spectra.
\subsection{Step 4: Searching for the CO features}
The mean RV of the host star $\tau$ Boo with respect to the barycenter of the Solar system is $-15.8\pm0.5$~km~s$^{-1}$ (blue-shifted;
\citealp{2000A&AS..142..217B}). The barycentric RV of the Earth with respect to the sky position of the target at the time of observations was between -22.8~km~s$^{-1}$ and -23.4~km~s$^{-1}$. This means that during the times of our measurements, the stellar spectrum of $\tau$ Boo was observed with a red-shift of about 8~km~s$^{-1}$. That red-shift was subtracted from the resultant RVs of the planetary signal, as described below.
After the removal of the stellar and telluric absorption lines, we corrected the residual spectra for trends originating from intra-pixel variations. We then cross-correlated the telluric-free and stellar-free target residual spectra with a CO-model spectrum (Fig.~1), which was calculated with the PHOENIX code \citep{1997ApJ...483..390H,2008ApJ...675L.105H,2011A&A...529A..44W} for a brown dwarf having a temperature of $T_{\rm eff}=1800$~K and Solar metallicity. To this end, we shifted the CO-model spectrum for each residual spectrum according to the instantaneous radial velocity $V_{\rm{p}}(\phi,i)$ with respect to the star, which depends on both the orbital phase $\phi$, which was a priori known, and the unknown orbital inclination $i$ as:
\begin{equation} \label{equ:doppler1}
V_{\rm{p}} = K_{\rm{p,max}} \sin 2\pi\phi \sin i = K_{\rm p} \sin 2\pi\phi ~.
\end{equation}
For all residual spectra, we then calculated the correlation value for different RV semi-amplitudes $K_{\rm p}$ of the planetary signal and finally determined the cross-correlation function (CCF) with respect to $K_{\rm p}$.
\subsection{Step 5: Confidence level}
The confidence level of the strongest peak of the CCF was determined by employing a
bootstrap randomisation method (e.g. \citealp{1997A&A...320..831K}): We assigned random values of the orbital phases to the observed spectra, thereby creating $N$ different data sets.
Any signal present in the original data was then scrambled in these
artificial data sets.
For all these randomized data sets, we again evaluated the model for the
free parameter $K_{\rm p}$ and located the best fit with its specific CCF-peak value.
The confidence level of the CCF peak was estimated to be $\approx 1-m/N$, where $m$ is the number of the best fit models having a CCF-peak value larger or equal than the maximum of the CCF found in the original, unscrambled
data sets. Notice that we consider a detection to have a confidence of $\ge 0.9987$, which translates to $\ge 3\sigma$.
\section{Results and discussion}
As illustrated in Figures 2 and 3, the results of our cross-correlation analysis using a CO-model spectrum
reveal Doppler velocity shifts consistent with a maximum RV semi-amplitude for the planet of $K_{\rm p} = 115 \pm 11~$km~s$^{-1}$. { The $1\sigma$-errorbar contains both the actual error of the velocity measurement, which was determined via bootstrap resampling \citep{bar92}, as well as the shift in velocity due to the uncertainties in the orbital solution.} In addition to that, our bootstrap randomisation analysis revealed that this signal is significant at the $3.4\sigma$ confidence level, with 2 false positives in 3000 trials.
\begin{figure}[t!]
\centering
\includegraphics[angle=-90,width=8.8cm]{ms-fg1.ps}
\caption{Bottom: Residual spectrum after subtraction of the stellar and telluric absorption lines from the data. The upper spectrum depicts the model of the dense forest of carbon-monoxide (CO) absorption lines. For clarity, the line depths in the CO model are shown for a planet-to-star flux ratio of $2\times10^{-3}$, and the spectrum is shifted up by 3 units.}
\label{plot01}
\end{figure}
\begin{figure}[t!]
\centering
\includegraphics[angle=-90,width=9cm]{ms-fg2.ps}
\caption{CO absorption in the planet atmosphere of $\tau$~Boo~b. The individual cross-correlation functions (CCFs) of the 58 residual spectra with the CO model spectrum in the rest-frame of the star are shown. During the course of the observations, the orbital motion of the planet produces a RV-shift starting at about -35~km~s$^{-1}$ and ending at 65~km~s$^{-1}$, respectively for orbital phases of 0.55 and 0.61. The linear grey-scales indicate the strength of the cross-correlation signal (bright means absorption, dark means emission).}
\label{plot02}
\end{figure}
\begin{figure}[t!]
\centering
\includegraphics[angle=-90,width=8.7cm]{ms-fg3.ps}
\caption{The CO signal co-added for all spectra. The cross-correlation functions (CCF) from all 58 residual spectra were combined assuming different RV semi-amplitudes of $\tau$~Boo~b. The peak of the CCF occurs at a RV semi-amplitude of $115\pm 11$~km~s$^{-1}$, which corresponds to an orbital inclination of $i=47^{+7}_{-6}$~degrees. The peak of the CCF is significant at the $3.4\sigma$ confidence level.}
\label{plot03}
\end{figure}
The measured RV semi-amplitude corresponds to an orbital inclination for the system of $i = 47^{+7}_{-6}$~degrees. Adopting the measured orbital inclination, the value of the stellar mass in Table1 and Kepler Laws of planetary motion, we finally are able to derive an absolute mass for the planet of $m_{\rm p} = 5.6 \pm 0.7~{\rm M}_{\rm Jup}$. This value clearly confirms the planet hypothesis for the hot Jupiter $\tau$~Boo~b.
Given the precision of our data, possible residuals of the planetary spectrum in the stellar template, the lack of detailed atmospheric temperature-pressure profile models for this planet, and uncertainties in the possible level of masking between CO and CH$_4$ (both molecules are expected to co-exist in $\tau$ Boo, e.g. \citealp{1999ApJ...512..843B}), we cannot provide a reliable estimation of the CO abundance in the atmosphere of $\tau$ Boo b. However, we can place a lower limit to the planet-to-star flux ratio in the observed wavelength range based on the atmospheric model we adopted. To this end, we created data sets which were based on our residual spectra. For each of these data sets, we injected an artificial planetary CO signal having a RV semi-amplitude of $K_{\rm p} = 115$~km~s$^{-1}$ with selected planet-to-star flux ratios from $10^{-2}$ down to $10^{-4}$. To avoid an overlapping of the CCF peaks with the real signal, we red-shifted the spectrum of the injected planet by 50~km~s$^{-1}$. For each data set, we then determined that peak of the CCF apart from the one of real signal and determined its confidence level. We find that with our data set and our CO-model, we are sensitive to detect a planet with planet-to-star flux ratio larger than $\approx7\times10^{-4}$ at the 3$\sigma$ confidence level.
We emphasize that this measurement of the exact mass of $\tau$~Boo~b represents the first successful determination of the mass of a non-transiting planet by means of high-resolution spectroscopy.
This technique has therefore the potential of providing direct masses and estimation of the atmospheric composition of non-transiting exoplanets in the near-future, in particular as new facilities like E-ELT become available.
\acknowledgments
Based on observations made with ESO Telescopes at the Paranal Observatory under programme ID 087.C-0407. This work has been partially supported by NSF's grant AST-0908278.
We are very grateful to Andreas Seifahrt for his help with LBLRTM and HITRAN, to Martin K\"urster for input for data analysis strategies and statistics, and to John Barnes for discussions about the search for thermal emission from hot Jupiters. We would further like to thank Ignas Snellen and Matteo Brogi for their help, and to the anonymous referee for very constructive suggestions which significantly improved the manuscript.
|
{
"timestamp": "2012-06-28T02:02:05",
"yymm": "1206",
"arxiv_id": "1206.6197",
"language": "en",
"url": "https://arxiv.org/abs/1206.6197"
}
|
\section{Introduction}
\smallskip
Let $E$ be an elliptic curve defined over ${\mathbb Q}$.
For a prime $p$ of good reduction for $E$
the reduction of $E$ modulo $p$ is an elliptic curve $E_p$ defined over the finite field ${\mathbb F}_p$
with $p$ elements.
Denote by $E_p({\mathbb F}_p)$ the group of ${\mathbb F}_p$-rational points of $E_p$.
Its structure as a group, for example, the existence of large cyclic subgroups, especially of prime order, is of interest because of applications to elliptic curve cryptography \cite{Koblitz1987, Miller1986}.
It is well known that the finite abelian group $E_p({\mathbb F}_p)$ has structure
\begin{equation}\label{Structure}
E_p({\mathbb F}_p)\simeq ({\mathbb Z}/d_p{\mathbb Z}) \oplus ({\mathbb Z}/e_p{\mathbb Z})
\end{equation}
for uniquely determined positive integers $d_p$ and $e_p$ with $d_p\mid e_p$.
Here $e_p$ is the size of the maximal cyclic subgroup of $E_p({\mathbb F}_p)$, called the exponent of $E_p({\mathbb F}_p)$.
The study about $e_p$ as a function of $p$ has received considerable attention
\cite{Schoof1991, Duke2003, Co2003, CoMu2004}, where the following problems were considered:
\begin{itemize}
\item{lower bounds for the maximal values of $e_p$,}
\item{the frequency of $e_p$ taking its maximal value,
i.e., the density of the primes $p$ for which $E_p({\mathbb F}_p)$ is a cyclic group,}
\item{the smallest prime $p$ for which the group $E_p({\mathbb F}_p)$ is cyclic (elliptic curve analogue of Linnik's problem).}
\end{itemize}
Very recently motivated by a question of Silverman,
Freiberg and Kurlberg \cite{FK2012} investigated the average order of $e_p$.
Before stating their results, let us fixe some notation.
Given a positive integer $k$,
let $E[k]$ denote the group of $k$-torsion points of $E$
(called {\it the $k$-division group of $E$}) and
let $L_k := {\mathbb Q}(E[k])$ be the field obtained by adjoining to ${\mathbb Q}$ the coordinates of the points of $E[k]$
(called {\it the $k$-division field of $E$}).
Write
\begin{equation}\label{defnLk}
n_{L_k} := [L_k : {\mathbb Q}].
\end{equation}
Denote by $\mu(n)$ the M\"obius function, by $\pi(x)$ the prime-counting function and
by $\zeta_{L_k}(s)$ the Dedekind zeta function associated with $L_k$, respectively.
Assuming the Generalized Riemann Hypothesis (GRH) for $\zeta_{L_k}(s)$ for all positive integers $k$,
Freiberg and Kurlberg \cite[Theorem 1.1]{FK2012} shew that
\begin{equation}\label{FK}
\frac{1}{\pi(x)} \sum_{p\leqslant x} e_p
= \frac{1}{2} C_E x + O_E\big(x^{9/10} (\log x)^{11/5}\big)
\end{equation}
for all $x\geqslant 2$,
where
\begin{equation}\label{defCE}
C_E
:= \sum_{k=1}^{\infty} \frac{1}{n_{L_k}} \sum_{dm=k} \frac{\mu(d)}{m}
= \prod_p \bigg(1-\sum_{\nu=1}^{\infty} \frac{p-1}{p^\nu n_{L_{p^\nu}}}\bigg).
\end{equation}
The implied constant depends on $E$ at most.
When $E$ has complex multiplication (CM), they \cite[Theorem 1.2]{FK2012} also proved that \eqref{FK} holds unconditionally with a weaker error term
\begin{equation}\label{FKCM}
O_E\bigg(x\frac{\log_3x}{\log_2x}\bigg),
\end{equation}
where $\log_\ell$ denotes the $\ell$-fold iterated logarithm.
\vskip 2mm
The aim of this short note is to propose more precise result than \eqref{FK} and \eqref{FKCM}.
\begin{theorem}\label{thm}
Let $E$ be an elliptic curve over ${\mathbb Q}$.
\par
{\rm (a)}
Assuming GRH for the Dedekind zeta function $\zeta_{L_k}$ for all positive integers $k$, we have
\begin{equation}\label{thm(a)}
\frac{1}{\pi(x)} \sum_{p\leqslant x} e_p
= \frac{1}{2} C_E x + O_E\big(x^{5/6} (\log x)^{4/3}\big).
\end{equation}
{\rm (b)}
If $E$ has CM, then we have unconditionally
\begin{equation}\label{thm(b)}
\frac{1}{\pi(x)} \sum_{p\leqslant x} e_p
= \frac{1}{2} C_E x + O_E\bigg(\frac{x}{(\log x)^{1/14}}\bigg).
\end{equation}
Here $C_E$ is given as in \eqref{defCE} and the implied constants depend on $E$ at most.
\end{theorem}
{\bf Remark}.
(a)
Our proof of Theorem \ref{thm} is a refinement of Freiberg and Kurlberg's method \cite{FK2012}
with some simplification.
(b)
For comparison of \eqref{FK} and \eqref{thm(a)}, we have $\tfrac{9}{10}=0.9$ and $\tfrac{5}{6}=0.833\cdots$.
(c)
The quality of \eqref{thm(b)} can be compared with the following result of Kurlberg and Pomerance \cite[Theorem 1.2]{KP2012} concernng the multiplicative order of a number modulo $p$ :
Given a rational number $g\not=0, \pm 1$ and prime $p$ not dividing the numerator of $g$,
let $\ell_g(p)$ denote the multiplicative order of $g$ modulo $p$.
Assuming GRH for $\zeta_{{\mathbb Q}(g^{1/k}, {\rm e}^{2\pi{\rm i}/k})}(s)$ for all positive integers $k$, one has
$$
\frac{1}{\pi(x)} \sum_{p\leqslant x} \ell_g(p)
= \frac{1}{2} C_g x + O\bigg(\frac{x}{(\log x)^{1/2-1/\log_3x}}\bigg),
$$
where $C_g$ is a positive constant depending on $g$.
\vskip 5mm
\section{Preliminary}
Let $E$ be an elliptic curve over ${\mathbb Q}$ with conductor $N_E$ and let $k\geqslant 1$ be an integer.
For $x\geqslant 1$, define
$$
\pi_E(x; k)
:= \sum_{\substack{p\leqslant x\\ p\nmid N_E, \, k\mid d_p}} 1.
$$
The evaluation of this function will play a key role in the proof of Theorem \ref{thm}.
Using the Hasse inequality (see \eqref{Hasse} below),
it is not difficult to check that $p\nmid d_p$ for $p\nmid N_E$.
Thus the conditions $p\nmid N_E$ and $k\mid d_p$ are equivalent to $p\nmid kN_E$ and $k\mid d_p$,
that is $p\nmid kN_E$ and $E_p({\mathbb F}_p)$ contains a subgroup isomorphic to ${\mathbb Z}/k{\mathbb Z}\times {\mathbb Z}/k{\mathbb Z}$.
Hence by \cite[Lemma 1]{Mu1983}, we have
$$
\sum_{\substack{p\leqslant x\\ \text{$p$ splits completely in $L_k$}}} 1
= \pi_E(x; k) + O(\log(N_Ex)).
$$
In order to evaluate the sum on the left-hand side,
we need effective versions of the Chebotarev density theorem.
They were first derived by Lagarias and Odlyzko \cite{LaOd1979},
refined by Serre \cite{Serre1981}, and subsequently improved by M. Murty, V. Murty and Saradha \cite{MuMuSa1988}.
With the help of these results, one can deduce the following lemma
(cf. \cite[Lemma 3.3]{FK2012}).
\begin{lemma}\label{lem1}
Let $E$ be an elliptic curve over ${\mathbb Q}$ with conductor $N_E$.
\par
{\rm (a)}
Assuming GRH for the Dedekind zeta function $\zeta_{L_k}(s)$, we have
\begin{equation}\label{Eq1lem1}
\pi_E(x; k) = \frac{\hbox{{\rm Li}}(x)}{n_{L_k}} + O\big(x^{1/2} \log(N_Ex)\big)
\end{equation}
uniformly for $x\geqslant 2$ and $k\geqslant 1$,
where the implied constant is absolute.
\par
{\rm (b)}
There exist two absolute constants $B>0$ and $C>0$ such that
\begin{equation}\label{Eq2lem1}
\pi_E(x; k)= \frac{\hbox{{\rm Li}}(x)}{n_{L_k}} + O\big(x {\rm e}^{-B(\log x)^{5/14}}\big)
\end{equation}
unformly for $x\geqslant 2$ and $C N_E^2 k^{14}\leqslant \log x$,
where the implied constant is absolute.
\end{lemma}
The next lemma (cf. \cite[Proposition 3.2]{FK2012} or \cite[Propositions 3.5 and 3.6]{CoMu2004})
gathers some properties of the division fields $L_k$ of $E$ and estimates for $n_{L_k}$,
which will be useful later.
Denote by $\varphi(k)$ the Euler function.
\begin{lemma}\label{lem2}
{\rm (a)}
The field $L_k$ contains ${\mathbb Q}({\rm e}^{2\pi {\rm i}/k})$.
Therefore $\varphi(k)\mid n_{L_k}$ and
a rational prime $p$ which splits completely in $L_k$ satisfies $p\equiv 1 ({\rm mod}\,k)$.
\par
{\rm (b)}
$n_{L_k}$ divides $|\hbox{{\rm GL}}_2({\mathbb Z}/k{\mathbb Z})|=k^3 \varphi(k) \prod_{p\mid k} (1-p^{-2})$.
\par
{\rm (c)}
If $E$ is a non-CM curve, then there exists a constant $B_E\geqslant 1$ (depending only on $E$)
such that $|\hbox{{\rm GL}}_2({\mathbb Z}/k{\mathbb Z})|\leqslant B_E n_{L_k}$ for each $k\geqslant 1$.
Moreover, we have $|\hbox{{\rm GL}}_2({\mathbb Z}/k{\mathbb Z})| = n_{L_k}$ whenover $(k, M_E)=1$
$($where $M_E$ is Serre's constant\,$)$.
\par
{\rm (d)}
If $E$ has CM, then $\varphi(k)^2\ll n_{L_k}\leqslant k^2$.
\end{lemma}
\vskip 5mm
\section{Proof of Theorem \ref{thm}}
Let $a_E(p) := p+1- |E_p({\mathbb F}_p)|$, then
$$
e_p = \begin{cases}
(p+1-a_E(p))/d_p & \text{if $\,p\nmid N_E$},
\\\noalign{\vskip 1mm}
0 & \text{otherwise}.
\end{cases}
$$
By using Hasse's inequality
\begin{equation}\label{Hasse}
|a_E(p)|<2\sqrt{p}
\end{equation}
for all primes $p\nmid N_E$, it is easy to see that
\begin{equation}\label{Eq1}
\sum_{p\leqslant x} e_p
= \sum_{p\leqslant x, \, p\nmid N_E} \frac{p}{d_p} + O\bigg(\frac{x^{3/2}}{\log x}\bigg).
\end{equation}
In order to evaluate the last sum,
we first notice that the Hasse inequality \eqref{Hasse} implies $d_p\leqslant 2\sqrt{p}$.
Thus we can use the formula
$$
\frac{1}{k}
= \sum_{dm\mid k} \frac{\mu(d)}{m}
$$
to write
\begin{equation}\label{Eq2}
\sum_{\substack{p\leqslant x\\ p\nmid N_E}} \frac{p}{d_p}
= \sum_{\substack{p\leqslant x\\ p\nmid N_E}} p \sum_{dm\mid d_p} \frac{\mu(d)}{m}
= \sum_{k\leqslant 2\sqrt{x}} \sum_{dm=k} \frac{\mu(d)}{m}
\sum_{\substack{p\leqslant x\\ p\nmid N_E, \, k\mid d_p}} p.
\end{equation}
Let $y\leqslant 2\sqrt{x}$ be a parameter to be choosen later and define
\begin{align*}
S_1
& := \sum_{k\leqslant y} \sum_{dm=k} \frac{\mu(d)}{m} \sum_{\substack{p\leqslant x\\ p\nmid N_E, \, k\mid d_p}} p,
\\
S_2
& := \sum_{y<k\leqslant 2\sqrt{x}} \sum_{dm=k} \frac{\mu(d)}{m} \sum_{\substack{p\leqslant x\\ p\nmid N_E, \, k\mid d_p}} p.\end{align*}
With the help of Lemma \ref{lem1}(a), a simple partial integration allows us to deduce
(under GRH)
\begin{equation}\label{sump}
\begin{aligned}
\sum_{\substack{p\leqslant x\\ p\nmid N_E, \, k\mid d_p}} p
& = \int_{2-}^x t \,{\rm d} \pi_E(t; k)
= x \pi_E(x; k)
- \int_{2}^x \pi_E(t; k) \,{\rm d} t
\\\noalign{\vskip -3mm}
& = \frac{x \hbox{{\rm Li}}(x)}{n_{L_k}}
- \frac{1}{n_{L_k}} \int_{2}^x \hbox{{\rm Li}}(t) \,{\rm d} t
+ O_E\big(x^{3/2}\log x\big)
\\\noalign{\vskip 1mm}
& = \frac{\hbox{{\rm Li}}(x^2)}{n_{L_k}}
+ O_E\big(x^{3/2}\log x\big).
\end{aligned}
\end{equation}
On the other hand, by Lemma \ref{lem2} we infer that
\begin{equation}\label{CE}
\sum_{k\leqslant y} \frac{1}{n_{L_k}} \sum_{dm=k} \frac{\mu(d)}{m}
= C_E + O(y^{-1}).
\end{equation}
Thus combining \eqref{sump} with \eqref{CE} and using the following trivial inequality
\begin{equation}\label{trivial}
\bigg|\sum_{dm=k} \frac{\mu(d)}{m}\bigg|
\leqslant \frac{\varphi(k)}{k}
\leqslant 1,
\end{equation}
we find
\begin{equation}\label{S1}
\begin{aligned}
S_1
& = \hbox{{\rm Li}}(x^2) \sum_{k\leqslant y} \frac{1}{n_{L_k}}\sum_{dm=k} \frac{\mu(d)}{m}
+ O_E\bigg(x^{3/2}\log x \sum_{k\leqslant y} \bigg|\sum_{dm=k} \frac{\mu(d)}{m}\bigg|\bigg)
\\
& = C_E \hbox{{\rm Li}}(x^2)
+ O_E\bigg(\frac{x^2}{y\log x} + x^{3/2}y\log x\bigg).
\end{aligned}
\end{equation}
Next we treat $S_2$.
By \cite[Lemma 3.1 and Proposition 3.2(a)]{FK2012},
we see that $k\mid d_p$ implies that $k^2\mid (p+1-a_E(p))$ and also $k\mid (p-1)$,
hence $k\mid (a_E(p)-2)$.
With the aid of this and the Brun-Titchmarsh inequality, we can deduce that
\begin{align*}
S_2
& \ll x \sum_{y<k\leqslant 2\sqrt{x}}
\bigg(
\sum_{\substack{|a|\leqslant 2\sqrt{x}, a\not=2\\ a\equiv 2 ({\rm mod} k)}}
\sum_{\substack{p\leqslant x, a_E(p)=a\\ k^2\mid p+1-a}} 1
+
\sum_{\substack{p\leqslant x, a_E(p)=2\\ k^2\mid p-1}} 1
\bigg)
\\
& \ll x \sum_{y<k\leqslant 2\sqrt{x}}
\bigg(
\frac{\sqrt{x}}{k}\cdot\frac{x}{k\varphi(k)\log(8x/k^2)}
+
\frac{x}{k^2}
\bigg).
\end{align*}
By virtue of the elementary estimate
$$
\sum_{n\leqslant t} \frac{1}{\varphi(k)}
= D\log t + O(1)
\qquad(t\geqslant 1)
$$
with some positive constant $D$,
a simple integration by parts leads to
\begin{equation}\label{S2}
S_2
\ll \frac{x^{5/2}}{y^2 \log(8x/y^2)} + \frac{x^{2}}{y}\cdot
\end{equation}
Inserting \eqref{S1} and \eqref{S2} into \eqref{Eq2}, we find
\begin{equation}\label{Eq3}
\begin{aligned}
\sum_{p\leqslant x, \, p\nmid N_E} \frac{p}{d_p}
= C_E \hbox{{\rm Li}}(x^2)
+ O_E\bigg(x^{3/2}y\log x + \frac{x^{5/2}}{y^2 \log(8x/y^2)} + \frac{x^{2}}{y}\bigg),
\end{aligned}
\end{equation}
where we have used the fact that the term $x^2y^{-1}(\log x)^{-1}$
can be absorded by $x^{5/2}y^{-2}(\log(8x/y^2))^{-1}$
since $y\leqslant 2\sqrt{x}$.
Now the asymptotic formula \eqref{thm(a)} follows from \eqref{Eq1} and \eqref{Eq3}
with the choice of $y=x^{1/3}(\log x)^{-2/3}$.
\vskip 1mm
The proof of \eqref{thm(b)} is very similar to that of \eqref{thm(a)}.
Next we shall only point out some important differences.
Similar to \eqref{sump}, we can apply Lemma \ref{lem1}(b) to prove (unconditionally)
$$
\sum_{\substack{p\leqslant x\\ p\nmid N_E, \, k\mid d_p}} p
= \frac{\hbox{{\rm Li}}(x^2)}{n_{L_k}}
+ O_E\big(x^{2}\exp\{-B(\log x)^{5/14}\}\big)
$$
for $k\leqslant (C^{-1}N_E^{-2}\log x)^{1/14}$.
As before from this and \eqref{CE}-\eqref{trivial}, we can deduce that
\begin{equation}\label{S1(b)}
S_1
= C_E \hbox{{\rm Li}}(x^2)
+ O_E\big(x^2y^{-1}(\log x)^{-1} + x^{2}y{\rm e}^{-B(\log x)^{5/14}}\big)
\end{equation}
for $y\leqslant (C^{-1}N_E^{-2}\log x)^{1/14}$.
The treatment of $S_2$ is different.
First we divide the sum over $k$ in $S_2$ into two parts accroding to
$y<k\leqslant x^{1/4}(\log x)^{3/4}$ or $x^{1/4}(\log x)^{3/4}<k\leqslant 2\sqrt{x}$.
When $E$ has CM, we have (see \cite[page 692]{Duke2003})
$$
\sum_{\substack{p\leqslant x\\ p\nmid N_E, \, k\mid d_p}} 1
\ll \frac{x}{\varphi(k)^2 \log x}
$$
for $k\leqslant x^{1/4}(\log x)^{3/4}$.
Thus the contribution from $y<k\leqslant x^{1/4}(\log x)^{3/4}$ to $S_2$ is
\begin{align*}
& \ll \frac{x^2}{\log x} \sum_{y<k\leqslant x^{1/4}(\log x)^{3/4}} \frac{1}{\varphi(k)^2}
\ll \frac{x^2}{y\log x}\cdot
\end{align*}
Clearly the inequality \eqref{S2} (taking $y=x^{1/4}(\log x)^{3/4}$) implies that the contribution from
$x^{1/4}(\log x)^{3/4}<k\leqslant 2\sqrt{x}$ to $S_2$ is
\begin{align*}
\ll \sum_{x^{1/4}(\log x)^{3/4}<k\leqslant 2\sqrt{x}}
\sum_{\substack{p\leqslant x\\ p\nmid N_E, \, k\mid d_p}} p
& \ll \frac{x^2}{(\log x)^{5/2}}\cdot
\end{align*}
By combining these two estimates, we obtain
\begin{equation}\label{S2(b)}
S_2\ll \frac{x^2}{y\log x} + \frac{x^2}{(\log x)^{5/2}}\cdot
\end{equation}
Inserting \eqref{S1(b)} and \eqref{S2(b)} into \eqref{Eq2}, we find
\begin{equation}\label{Eq3(b)}
\begin{aligned}
\sum_{p\leqslant x, \, p\nmid N_E} \frac{p}{d_p}
= C_E \hbox{{\rm Li}}(x^2)
+ O_E\bigg(\frac{x^2}{y\log x} + \frac{x^2}{(\log x)^{5/2}} + x^{2}y{\rm e}^{-B(\log x)^{5/14}}\bigg)
\end{aligned}
\end{equation}
for $y\leqslant (C^{-1}N_E^{-2}\log x)^{1/14}$.
Now the asymptotic formula \eqref{thm(b)} follows from \eqref{Eq1} and \eqref{Eq3(b)}
with the choice of $y = (C^{-1}N_E^{-2}\log x)^{1/14}$.
\vskip 10mm
|
{
"timestamp": "2012-06-27T02:03:12",
"yymm": "1206",
"arxiv_id": "1206.5929",
"language": "en",
"url": "https://arxiv.org/abs/1206.5929"
}
|
\section{Introduction}
For a contractive iterated function system (IFS) $\{S_j\}_{j=1}^N$ of similitidues on ${\mathbb R}^d$, there is a tree of finite words which represents each point of the associated self-similar set $K$. The iteration defines a random walk on the tree, and the Martin boundary of the random walk is a Cantor set (\cite{[C]}, see also \cite{[K3]}).
On the other hand, Denker and Sato [DS1-3] introduced a random walk on the symbolic space of the Sierpinski gasket, and showed that the Martin boundary is homeomorphic to the gasket. Furthermore, they identified a subclass of harmonic functions from the random walk with Kigami's harmonic functions (\cite{[K1]}, \cite{[K2]}) on the gasket. The case of the pentagasket and other extensions were studied in \cite{[I]} and \cite{[DIK]}. Recently, Lau, Ju and the author \cite{[JLW]} extended this to the class of mono-cyclic post critically finite (p.c.f.) self-similar sets, more generally to self-similar sets with the open set condition (OSC) \cite{[Lau-Wang-2011]}. This provides a close link of the boundary theory with the recent development of analysis on fractals.
In another direction, Kaimanovich \cite{[Ka]} introduced a hyperbolic structure (``augmented tree") on the symbolic space of the Sierpinski gasket, and showed that the gasket can be identified by the hyperbolic boundary of the graph. The Martin boundary of the simple random walk on the graph can be obtained by a general theory on the random walk on hyperbolic graph (\cite{[A]}, \cite{[Woess]}).
Let $\{S_j\}_{j=1}^N$ be an IFS of similitudes on ${\mathbb R}^d$. Denote by $0< r_i <1, \ i=1,2, \cdots, N$ the contraction ratio of $S_i$. Let $\Sigma^* = \cup_{n=0}^\infty \{1,2,\cdots, N\}^n$ be the finite words space. (We use the notation $o$ to denote the empty word and $\{1,2, \cdots, N\}^0 := \{o\}$). For $\bfi = i_1 \cdots i_n, \bfj = j_1 \cdots j_m \in \Sigma^*$, denote $\bfi \bfj = i_1 \cdots i_n j_1 \cdots j_m$ the concatenation ($o \bfi = \bfi o = \bfi$), $S_\bfi = S_{i_1} \circ \cdots \circ S_{i_n}$ the composition ($S_o$ is the identity map by convention) and $r_\bfi = r_1 \cdots r_{i_n}$. Let $r=\min\{r_i:\ i=1,2, \cdots, N\}$, and for each integer $n \ge 1$, let
\[
{\mathcal J}_n = \left\{j_1 j_2 \cdots j_n \in \Sigma^*:\ r_{j_1} \cdots r_{j_n} \le r^n < r_{j_1} \cdots r_{j_{n-1}}\right\}. \quad {\mathcal J} = \cup_{n=0}^\infty {\mathcal J}_n,
\]
where ${\mathcal J}_0=\{o\}$. If $\bfi = i_1 \cdots i_k \in {\mathcal J}_n$, we denote this by $|\bfi| = n$, and say that $\bfi$ is in {\it level} $n$ (Note that $|\bfi|$ is not the length of $\bfi$ in general). We say $\bfi, \bfj \in {\mathcal J}$ are {\it equivalent} and denote by $\bfi \sim \bfj$ if and only if $S_\bfi = S_\bfj$. It is clear that $\bfi \sim \bfj$ implies that $|\bfi|=|\bfj|$. Moreover, $\sim$ defines an equivalence relation on ${\mathcal J}$. We denote by $X$ the quotient space ${\mathcal J}/\sim$, and $[\bfi]$ the equivalence class of $\bfi$. For $x = [\bfi] \in X$, we denote $S_x = S_\bfi$ and $|x| = |\bfi|$. By abusing notation, we write $\bfi \in X$ means that $[\bfi] \in X$.\\
There is a natural graph on $X$: For $x=\{\bfi_1, \cdots, \bfi_n\}, \ y =\{ \bfj_1, \cdots, \bfj_m\} \in X$ (recall an element in $X$ is an equivalence class of some multi-index in $\Sigma^*$), we say that there is an edge between $x$ and $y$ if $\bfj_k = \bfi_\ell \bfk$ for some $1 \le k\le m, 1\le \ell \le n$, $\bfk \in \Sigma^*$ and $|y| = |x| + 1$. We denote by ${\mathcal E}_v$ the above edges set. For $y \in X$, we use the notation $y^{-1}$ to denote any one of $x \in X$ such that $(x,y) \in {\mathcal E}_v$ and $|y| = |x| + 1$. More general, define inductively $y^{-n}= (y^{-(n-1})^{-1}$. It follows that $x, x^{-1}, \cdots, x^{-n}$ is a path from $x$ to $x^{-n}$. If $(x,y) \in {\mathcal E}_v$ with $|y| = |x|+1$, we say that $x$ is an {\it ancestor} of $y$ and $y$ a {\it descendent} of $x$. It is possible that a vertex in $X$ has more than one ancestor. Also by abusing notation, we write $(\bfi, \bfj) \in {\mathcal E}_v$ to mean that $([\bfi], [\bfj]) \in {\mathcal E}_v$.\\
In order to describe the self-similar set $K$, we need more edges. Let
\[
{\mathcal E}_v^+ = \{ (x,y): \ |y| = |x| + 1,\ S_x(K) \cap S_y(K) \not= \emptyset, \ x \not=y^{-1} \};
\]
and let
\[
{\mathcal E}_h = \{ (x,y): \ |y| = |x| ,\ S_x(K) \cap S_y(K) \not= \emptyset \}.
\]
If $(x,y) \in {\mathcal E}_h$, such that $x^{-1} \not= y^{-1}$ for any $x^{-1}$ and any $y^{-1}$ (recall that $x^{-1}$ may not be unique), then we say $x$ and $y$ are {\it conjugates}. We call an edge in ${\mathcal E}_v \cup {\mathcal E}_v^+$ a {\it vertical edge}, and an edge in ${\mathcal E}_h$ a {\it horizontal edge}. Let
\[
{\mathcal E} = {\mathcal E}_v \cup {\mathcal E}_h, \quad \mbox{and} \quad
{\mathcal E}^\diamond = {\mathcal E}_v \cup {\mathcal E}_v^+.
\]
The graph $(X, {\mathcal E})$ simulates Kaimanovich's ``augmented tree" \cite{[Ka]}.\\
\begin{theorem}\label{theorem-main-1}
The graphs $(X, {\mathcal E})$ and $(X, {\mathcal E}^\diamond)$ are hyperbolic provided that the self-similar set $K$ has positive Lebesgue measure or the IFS satisfies the weak separation condition.
\end{theorem}
The definition of {\it weak separation condition} (WSC) (to see the definition \ref{def-WSC}) was first proposed by Lau and Ngai \cite{[LN]} to study the multifractal structure of an IFS with overlaps, and was studied extensively by many authors (\cite{[Z]}, \cite{[LNR]}, \cite{[FL]}, \cite{[Lau-Wang-2004]} and references there in). The WSC is an important condition in the study of IFS with overlap.
\begin{theorem}\label{theorem-main-2}
With the same assumptions as in Theorem \ref{theorem-main-1}, then the self-similar set $K$ is homeomorphic to the hyperbolic boundaries of $(X, {\mathcal E})$ and $(X, {\mathcal E}^\diamond)$. Furthermore, the H\"{o}lder equivalence holds if we assume additional conditions on the IFS (condition (H) in Section 4).
\end{theorem}
Recall that an IFS satisfies the {\it open set condition} (OSC), if there exists a bounded nonempty open set $O \subset {\mathbb R}^d$ such that $O \subset \cup_{i=1}^N S_i(O)$ with the union disjoint. It is well known that the OSC implies the WSC, and hence the above theorem \ref{theorem-main-1} and \ref{theorem-main-2} extend the results in \cite{[Lau-Wang-2009]} where the IFS satisfies the OSC.
We organize the paper as following. In Section 2, we recall some basic notations and definitions of a hyperbolic graph and a hyperbolic boundary. In Section 3, we study the properties of the graphs induced by an iterated function system, and prove criterions for the graphs $(X, {\mathcal E})$ and $(X, {\mathcal E}^\diamond)$ to be hyperbolic graphs. We prove theorem \ref{theorem-main-1} in Section 4. In Section 5, we will prove Theorem \ref{theorem-main-2}, and show an example where both the condition (H) and the H\"{o}lder equivalence do not hold. Some open questions are given at the end of the paper.\\
\section{Hyperbolic Graphs and Hyperbolic Boundaries}
Let $G$ be a countably infinite set, and ${\mathcal G} \subset G^2$. We say that $(G, {\mathcal G})$ (or simply $G$) is a {\it graph} if ${\mathcal G}$ does not have loops and is symmetric, i.e., $(x,x) \not\in {\mathcal G}$ for all $x\in G$, and $(x,y)\in {\mathcal G}$ implies that $(y,x) \in {\mathcal G}$. We identify $(x,y)$ and $(y,x)$ and call it an {\it edge}. To visualize the graph $(G, {\mathcal G})$, we draw a segment $[x,y]$ if $(x,y) \in {\mathcal G}$. A finite {\it path} $p[x,y]$ from $x$ to $y$ is a sequence $[x_0, x_1, \cdots, x_n]$ with $(x_{i-1}, x_{i}) \in {\mathcal G}$ and $x=x_0, y=x_n$, we use $|p[x,y]| (=n)$ to denote the {\it length} of the path. Throughout the paper, we assume that the graph is {\it connected}, i.e., for any two different vertices $x,y\in G$, there is a path between them. A graph carries an integer-valued metric $d(x,y)$, which is the minimal length of all paths from $x$ to $y$. If a path $p[x,y]$ has the minimal length, we say that the path is a {\it geodesic} segment and denote the path by $\pi[x,y]$. For $x\in G$, we call ${\rm deg}(x) = \{y\in G:\ (x,y)\in {\mathcal G}\}$ the {\it degree} of $x$. We say a graph is {\it local finite} if there exists a constant $c>0$ such that $\max\{{\rm deg}(x):\ x\in G\} \le c$. We fix a reference point $o \in G$ and call it the {\it root}. Denote $|x|= d(o,x)$, if $|x|=n$, we say $x$ is on the $n$-th {\it level}. If $|x| < |y|$, we say that $x$ is on the {\it upper level } of $y$, or $y$ is on the {\it lower level} of $x$.
Recall that the {\it Gromov product} of two vertices $x,y\in G$ is defined by
\begin{equation}\label{eq-Gromov-def}
|x \wedge y| = \frac{1}{2}(|x| + |y| - d(x,y)).
\end{equation}
\begin{defn} We say a graph $(G, {\mathcal G})$ is $\delta$-hyperbolic (with respect to the root $o$) if there exists a constant $\delta >0$ such that
\begin{equation}\label{def-hyperbolic}
|x \wedge y| \ge \min\{|x \wedge z|, \ |z \wedge y|\} - \delta, \quad \forall x, y, z \in G.
\end{equation}
\end{defn}
As in \cite{[Woess]}, we choose $a>0$ such that $ a' = e^{\delta a}-1
< \sqrt 2 -1$, where $\delta$ is as in (\ref{def-hyperbolic}). Define for $x,y \in G$,
\begin{equation} \label {eq1.1}
\rho_a(x,y) =
\begin{cases} \exp(-a|x\wedge y|), & x \not = y, \\0, \qquad & x=y .
\end{cases}
\end{equation}
Then
\begin{equation} \label {eq1.2}
\rho_a (x,y) \leq (1+a') \max \{ \rho_a(x,z), \rho_a(y,z)\},
\quad \forall \ x,y,z \in G.
\end{equation}
This means $\rho_a (\cdot , \cdot)$ is an {\it ultra-metric}. It is
not a metric, but is equivalent to the following metric:
$$
\theta_a (x,y) = \inf \{\sum_{i=1}^n \rho_a (x_{i-1},x_i): \ n\geq
1, \ x = x_0, x_1, \dots, x_n =y ,\ x_i\in G\},
$$
in sense that $(1-2 a') \rho_a \le \theta_a \le \rho_a$ (to see \cite[Proposition 22.8]{[Woess]}).
Since $\rho_a$ and $\theta_a$ define the same topology, in our
consideration we will use $\rho_a$ instead of $\theta_a$ for
simplicity. It is known that for any sequence $\{x_n\}_{n=1}^\infty$
such that $\lim_{n\to \infty} |x_n |= \infty$,
\medskip
{\it $\{x_n\}$ is Cauchy in the ultra-metric $\rho_a(x, y)$ if and
only if \ $\lim_{m, n\to \infty} |x_m\wedge x_n| = \infty$.}
\medskip
\begin {defn} Let $\widehat G$ denote the completion of the graph
$G$ under $\rho_a$. We call $\partial G = \widehat G \setminus G$
the {\rm hyperbolic boundary} of $G$.
\end{defn}
\medskip
The hyperbolic boundary $\partial G$ is a compact set. It is often
useful to identify $\xi \in \partial G$ with the {\it geodesic rays} in $G$ that converge to $\xi$. (By a geodesic ray, we mean an infinite path $ \pi[x_0, x_1, x_2, \cdots]$ such that $(x_i, x_{i+1}) \in {\mathcal G} \ (i=0, 1, \cdots)$, starting from the root $o$ and with any finite segment of the path being a geodesic). Note that two geodesic rays $\xi = \pi[x_0, x_1, x_2, \cdots ]$ and $\eta = \pi[y_0, y_1, y_2, \cdots ]$ are equivalent as Cauchy sequences in the ultra-metric $\rho_a$ if and only if
\begin{equation} \label{eq2.31}
d(x_n, y_n) \leq c\delta
\end{equation}
for all but finitely many $n$, where $c>0$ is independent of the
rays \cite{[Woess]}.\\
Let $\pi[x_0, x_1, x_2, \cdots]$ be a geodesic ray and $y\in G$. For each $n$, there is a geodesic $\pi[y, z_1, \cdots, z_k, x_n]$ connecting $y$ and $x_n$. Note that $p[y, z_1, \cdots, z_k, x_n, x_{n+1}]$ is a path from $y$ to $x_{n+1}$. It follows that $d(y, x_{n+1}) \le d(y, x_n)+1$, this implies that $\{|y \wedge x_n|\}_{n=1}^\infty$ is a non-increasing sequence, and hence $\lim_{n\to\infty} |y \wedge x_n|< \infty$ exists. Similarly, if $\pi[y_0, y_1, y_2, \cdots]$ is another geodesic ray, then $\lim_{n\to\infty} |x_n \wedge y_n|$ exists and is finite. We extend the Gromov product and ultra-metric to $\partial G$:
\begin{equation}\label{eq-extend-Gromov}
|\xi \wedge \eta| = \inf \{\lim_{n\to\infty} |x_n \wedge y_n |:\ x_n, y_n \in X, \ x_n\longrightarrow \xi,\ y_n \longrightarrow \eta\},
\end{equation}
where the infimum is taken over all geodesic rays $\pi[x_0, x_1, x_2, \cdots]$ and $\pi[y_0, y_1, y_2, \cdots]$ converging to $\xi$ and $\eta$ respectively.
Note that the value of $|x \wedge y|$ has the form $\frac{m}{2}$ (where $m$ is an integer), and hence the infimum is reached by some geodesic rays. Let $\pi[x_0, x_1,x_2, \cdots]$ and $\pi[y_0, y_1, y_2, \cdots]$ be geodesic rays which attain the infimum in ({\ref{eq-extend-Gromov}). Let $\pi[z_0, z_1, z_2, \cdots]$ be another geodesic ray which converges to $\gamma\in \partial G$. By (\ref{def-hyperbolic}), we have
\[
|x_n \wedge y_n| \ge \min\{|x_n \wedge z_n|, \ |z_n \wedge y_n|\} - \delta.
\]
Let $n\to\infty$ and taking the limit, we have
\[
|\xi \wedge \eta| \ge \lim_{n\to\infty} \min\{|x_n \wedge z_n|, \ |z_n \wedge y_n|\} - \delta \ge \min\{| \xi \wedge \gamma|, \ |\gamma \wedge \eta|\} - \delta.
\]
It follows that
\[
\rho_a(\xi, \eta) \le (1+a') \max\{\rho_a(\xi, \gamma),\ \rho_a(\gamma, \eta)\},
\]
where $a'=e^{a \delta} -1$. We see that the extension of $\rho_a$ is still an ultra-metric on $\partial G$ as in (\ref{eq1.2}).\\
On the other hand, if geodesic rays $\pi[x_1', x_2', \cdots]$ and $\pi[y_1', y_2', \cdots]$ converge to the above $\xi$ and $\eta$ respectively, then $\pi[x_1, x_2, \cdots]$ is equivalent to $\pi[x_1', x_2', \cdots]$, and $\pi[y_1, y_2, \cdots]$ is equivalent to $\pi[y_1', y_2', \cdots]$. By (\ref{eq2.31}), we have
\[
d(x_n,x_n') \le c \delta, \quad d(y_n, y_n') \le c \delta.
\]
It follows that
\[
\begin{aligned}
\Big{|} |x_n \wedge y_n| - |x_n' \wedge y_n'| \Big{|} & = \frac{1}{2}|d(x_n, y_n) - d(x_n', y_n')| \\
& \le \frac{1}{2} \Big{(} |d(x_n, y_n) - d(x_n, y_n')| + |d(x_n, y_n') - d(x_n', y_n')| \Big{)} \\
& \le \frac{1}{2} \left( d(y_n, y_n') + d(x_n, x_n') \right) \le c \delta.
\end{aligned}
\]
Hence
\begin{equation}\label{eq-ultra-ineq}
\rho_a(\xi,\eta) e^{-a c \delta} \le \lim_{n\to\infty} \rho_a(x_n', y_n') \le \rho_a(\xi, \eta).
\end{equation}
(The last inequality holds, because $\{x_n\}, \ \{y_n\}$ attain the minimum in (\ref{eq-extend-Gromov})).
This inequality will be used in section 5.\\
\bigskip
\section{Induced Graphs by IFS}
Let $\{S_j\}_{j=1}^N$ be an IFS of similitudes on ${\mathbb R}^d$. We sue the notation defined in Section 1 where we defined two graphs $(X, {\mathcal E})$ and $(X, {\mathcal E}^\diamond)$. Let $d(x,y)$ and $d^\diamond(x,y)$ be the graph metrics on $(X, {\mathcal E})$ and $(X, {\mathcal E}^\diamond)$ respectively. We select the empty word $o$ as the root of the graphs, then for any $\bfi \in \Sigma^*$, $|\bfi| = d(o, \bfi) = d^\diamond (o, \bfi)$ (recall the we abuse the notation $\bfi \in X$ for $[\bfi] \in X$).
If the IFS satisfies the OSC, then the graph $(X, {\mathcal E}_v)$ is a {\it tree} (For any $x\in X$, there exists a unique path from the root to $x$), and this case was studied in \cite{[Lau-Wang-2009]}. If the OSC does not hold, it is possible that $S_\bfi = S_\bfj$ for deferent $\bfi, \bfj \in \Sigma^*$. Hence there are deferent paths from the root to vertex $[\bfi] = [\bfj] \in X$.
\bigskip
\noindent
{\bf Example 1.}
Let $S_i(x)= \frac{1}{2} (x + i), \ x \in{\mathbb R}, \ i=0,1,2$ be an IFS, the self-similar set is $K=[0,2]$. ${\mathcal J}_2/ \sim = \big\{\{00\}, \{01\}, \{02,10\},\{11\},\{12,20\},\{21\},\{22\}\big\}$.\\
\indent The vertex $\{02, 10\}$ have two ancestors $\{0\}$ and $\{1\}$. $(0, 02), (1,02) \in {\mathcal E}_v$ (abusing the notation).
\begin{center}
\begin{figure}[ht]\label{fig-e}
\centerline{\includegraphics[width=15cm,height=4cm]{Example1-1.eps}}
\caption{\small{Example 1, $(a)$ the iteration; $(b)$ the graph $(X,{\mathcal E}^\diamond)$; $(c)$ the graph $(X,{\mathcal E})$. The solid lines in $(b), (c)$ are edges in ${\mathcal E}_v$; the doted lines in $(b), (c)$ are edges in ${\mathcal E}_v^+$, and ${\mathcal E}_h$ respectively.}}
\end{figure}
\end{center}
In the graph $(X, {\mathcal E}^\diamond)$, there are eight edges connecting $1$: one of them connects the root $o$; three of them connect the descendents ($[10]=\{10, 02\}, [11]=\{11\}, [12]=\{12, 20\}$), and the others belong to ${\mathcal E}_v^+$ (to see Figure 1 $(b)$).
In the graph $(X,{\mathcal E})$, for $n\ge 2$, each ``boundary vertex" ($0^n$ and $2^n$) has two horizontal neighbors, each ``near boundary vertex" ($0^{n-1}1$ and $2^{n-1}1$) has three horizontal neighbors, and the other vertices have four horizontal neighbors(to see Figure 1 $(c)$). {\hfill$\Box$}\\
\medskip
For the graph $(X, {\mathcal E})$, a geodesic $\pi[x,y]$ connecting $x$ and $y$ is called {\it canonical} if $\pi[x,y] = \pi[x,u] \cup \pi[u,v] \cup \pi[v,y]$ (one or two parts may vanish) with $\pi[u,v]$ a horizontal path and $\pi[x,u], \ \pi[v,y]$ vertical paths; Moreover for any geodesic path $\pi'[x,y]$, $d(o, \pi[x,y]) \le d(o, \pi'(x,y))$. By the definition of Gromov product (\ref{eq-Gromov-def}), we have
\begin{equation}\label{eq-graph2-Gromov}
|x \wedge y| = h - \frac{\ell}{2},
\end{equation}
where $h$ and $\ell$ are the level and the length of the horizontal segment $\pi[u,v]$ with respectively.\\
Following \cite{[Ka]}, we can use the following moves repeatedly to change the geodesic without increasing the length: for $u, v \in \pi[x,y]$, \ $|u|=|v|$,
\[
[u, v, v^{-1}] \to [u, u^{-1}, v^{-1}] \quad \mbox{and} \quad [u^{-1}, u, v] \to [u^{-1}, v^{-1}, v].
\]
By using this, we get a canonical geodesic. We should note that for a geodesic segment in ${\mathcal E}$, it cannot contain a sub-segment $[u,v,w]$ with $|u| =|w| = |v| - 1$, since in this case $(u,v), (v,w) \in {\mathcal E}_v$, which implies that $ S_v(K) \subset S_u(K) \cap S_w(K)$, it follows that $S_u(K) \cap S_w(K) \not= \emptyset$, and hence $(u,w) \in {\mathcal E}_h$ and $d(u,w) = 1$. This contradicts that $[u,v,w]$ is a geodesic segment. \\
\bigskip
An analogous to \cite[Theorem 2.3]{[Lau-Wang-2009]}, we have the following criterion for the graph $(X, {\mathcal E})$ to be hyperbolic. \\
\begin{theorem}\label{theorem-graph-II-hyperbloic}
The graph $(X, {\mathcal E})$ is hyperbolic if and only if there is a constant $L>0$ such that the length of any horizontal geodesic is bounded by $L$.
\end{theorem}
\begin{pf}
The proof of \cite[Theorem 2.3]{[Lau-Wang-2009]} works here. We give another proof for the necessary part only.
For any horizontal geodesic $\pi[x,y]$ connecting $x, y \in X$, without loss of generality, we assume that the length of $\pi[x,y]$ is an even number, say $2 k$. Let $z$ be the mid-point of $\pi[x,y]$. Then
\[
|x \wedge y| = |x| - k, \ \quad |x \wedge z| = |z \wedge y| = |x| - \frac{k}{2}.
\]
By (\ref{def-hyperbolic}), we have
\[
|x| - k \ge |x| - \frac{k}{2} - \delta, \ \quad {\rm i.e., } \quad k \le 2 \delta.
\]
\end{pf}
\bigskip
Now we study the graph $(X, {\mathcal E}^\diamond)$. Observing that if $p[u,v,w] \ (u\not=w)$ is a path in ${\mathcal E}^\diamond$ such that $|u| = |w| = |v| -1$, ($v$ in the lower level of $u$ and $w$) then $(u, v), \ (v,w) \in {\mathcal E}^\diamond$, and hence $S_u(K) \cap S_v(K) \not= \emptyset$ and $S_v(K) \cap S_w(K) \not= \emptyset$. Let $v' = v^{-2}$ (this $v'$ may not unique), then it is clear that $S_v(K) \subset S_{v'}(K)$. Thus $S_u(K) \cap S_{v'}(K) \not= \emptyset$ and $S_{v'}(K) \cap S_w(K) \not= \emptyset$, it follows that $(u,v'), \ (v', w) \in {\mathcal E}^\diamond$, i.e., $p[u,v',w]$ is also a path. We see that $p[u,v,w,v',u]$ is a closed path with $u, w$ in the same level and $v,\ v'$ in the lower and upper level respectively. The closed path $p[u,v,w,v',u]$ looks like a ``diamond".
\begin{defn} A graph $(G, {\mathcal G})$ is called a {\rm diamond} graph (or simply diamond) if \\
\indent \hspace{-0.45cm} (i) $(x,y) \not\in {\mathcal G}$ for any $x, y \in G$ with $|x| = |y|$;\\
(ii) For any path $p[u,v,w]$ with $|u| = |w| = |v| -1$, $(u \not= v)$, there exists $v', |v'| = |u|-1$
such that $p[u,v',w]$ is also a path.\\
\end{defn}
We have shown that
\begin{coro}\label{coro-IFS-diamond}
The graph $(X, {\mathcal E}^\diamond)$ defined in Section 1 is a diamond graph.
\end{coro}
For a diamond graph $(G, {\mathcal G})$, if $[u,v,w]$ is a geodesic segment, then $|v| \not= |u|$ and $|v| \not= |w|$. Hence there are three possible cases: (a) $|u|=|v|+1 = |w|+2$ (or $|w|=|v|+1=|u|+2$); (b) $|u|=|w| = |v|+1$; or (c) $|u|=|w| = |v|-1$. For the last case, we use the move $[u,v,w] \to [u,v',w]$, where $v'$ is as in the above definition. By repeating this move, we see that for any $x, y \in G$ there is a {\it canonical} geodesic $x= x_0, x_1, \cdots, x_n=y$ such that $|x_i| = |x_{i+1}| +1\ (i < k)$ and $|x_i| = |x_{i+1}| - 1 \ (i \ge k)$ for some $k$, and we say that $x_k$ is on the {\it top level} of the canonical geodesic.
As a direct consequence of this, we see that $d(x,y)$ is an even number for all $x,y\in G$ with $|x|=|y|$.\\
For a diamond graph, if $x, y \in G$, then there is a canonical geodesic from $x$ to $y$. We assume that $z$ is in the geodesic segment and is on the top level. Then it is clear that $|x \wedge y| =|z|$. \\
\begin{theorem}\label{th-hyperbolic-I}
A diamond graph $(G, {\mathcal G})$ is hyperbolic if and only if there exists some constant $\delta'>0$ such that for any $z\in G$ and any two geodesic pathes $\pi[o, x_1, \cdots, x_n, z]$ and $\pi[o, y_1, \cdots, y_n, z]$ from the root $o$ to $z$, we have $d(x_i, y_i) \le \delta',\ i=1, 2, \cdots, n$.
\end{theorem}
\begin{pf}
{\it Necessity:}
If otherwise, then for any integer $k>0$, there exists $z\in G$ and two geodesic pathes from the root $o$ to $z$: $o\to x \to z$ and $o \to y \to z$, $|x|= |y|$ with $d(x,y) = 2 k$. Let $x= x_1, \cdots, x_k, x^*, x_{k+1}, \cdots, x_{2 k} = y$ be the canonical geodesic joining $x$ and $y$. Then $|x \wedge y| = |x^*| =|x| - k$ and $|x \wedge z| = |z \wedge y| = |x| = |y|$. We see that
\[
|x \wedge y| = \min\{|x \wedge z|, \ |z \wedge y|\} -k.
\]
This contradicts the definition of a hyperbolic graph.\\
\medskip
\noindent
{\it Sufficiency:} We will prove that (\ref{def-hyperbolic}) holds for some constant $\delta>0$.\\
\indent
For this, we use canonical geodesics connecting them: $x \to w \to y$, \ $x \to u \to z$ and $z \to v \to y$, where $w, u$ and $v$ are on the top levels, then
\[
|x \wedge y| = |w|, \quad |x \wedge z| =|u|, \quad |z \wedge y|= |v|.
\]
\begin{center}
\begin{figure}[ht]\label{fig-1}
\centerline{\includegraphics[width=5cm,height=3.5cm]{1-1.eps}}
\caption{The canonical geodesics}
\end{figure}
\end{center}
Without loss generality, we assume that $|u| \le |v|$. Then (\ref{def-hyperbolic}) is reduced to $|w| \ge |u| - \delta$. Let $u'$ be on the geodesic segment from $u$ to $z$ such that $|u'|=|v|$ (to see Fig 2). The length of the path from $x$ to $y$: $x \to u \to \ u' \to v \to y$ is $(|x|-|u|) + (|u'|-|u|) + d(u', v) + (|y|-|v|) = |x| + |y| - 2 |u| + d(u', v)$. On the other hand, the canonical geodesic from $x$ to $y$ has length $(|x|-|w|) + (|y|-|w|) = |x|+|y|-2|w|$, and the geodesic has the minimal length. Hence
\[
|x| + |y| - 2 |u| + d(u', v) \ge |x|+|y|-2|w|,
\]
and thus $|w| \ge |u| - \frac{1}{2} d(u', v)$. Now we consider the two geodesics from the root $o$ to $z$: $o \to u' \to z$ and $o \to v \to z$, and note that $|u'|=|v|$. Using the hypothesis, we have $d(u',v) \le \delta'$. It follows that $|w| \ge |u| - \frac{\delta'}{2}$. This completes the proof.
\end{pf}
\bigskip
To end this section, we prove the following lemma which will be used in the next section.
\begin{lemma} \label{lemma-3x}
Let $d$ and $d^\diamond$ be the graph metrics on $(X, {\mathcal E})$ and $(X,{\mathcal E}^\diamond)$ with respectively. Then
\[
d^\diamond(x,y) \le d(x,y) + 1, \quad \forall x, y \in X.
\]
\end{lemma}
\begin{pf}
For any $x,y \in X$, assume that $x, x^{-1}, \cdots, x^{-n}, u_1, \cdots, u_\ell, y^{-m}, \cdots, y^{-1}, y$ is a canonical geodesic in ${\mathcal E}$, where $\{x^{-n}, u_1, \cdots, u_\ell, y^{-m}\}$ is the horizontal part, $\{x, x^{-1}, \cdots, x^{-n}\}$ and $\{y^{-m}, \cdots, y^{-1}, y\}$ are the vertical parts of the canonical geodesic. We consider the two possible cases: (a) $\ell = 2 k +1$ is an odd number; or (b) $\ell = 2 k$ is an even number. In the first case, we replace the horizontal part by $x^{-n}, u_1^{-1}, u_2^{-2}, \cdots, u_k^{-k}, u_{k+1}^{-(k+1)}, u_{k+2}^{-k}, \cdots, u_{2 k +1}^{-1}, y^{-m}$ (this is a path in ${\mathcal E}^\diamond$). Then we get a new path in ${\mathcal E}^\diamond$ with length $\le d(x,y)$.
In the case $\ell = 2 k$, we replace the horizontal part by $x^{-n}, u_1^{-1}, u_2^{-2}, \cdots, u_k^{-k}, u_{k}^{-(k+1)}, u_{k+1}^{-k}, \cdots, u_{2 k}^{-1}, y^{-m}$. We see that $d^\diamond(x,y) \le d(x,y)+1$ in both cases.
\end{pf}
\bigskip
\section{Hyperbolicity of the Graphs}
In this section, we first recall the definition of the {\it weak separation condition} for an IFS and its basic properties. The definition was first proposed by Lau and Ngai \cite{[LN]} to study the multifractal structure of an IFS with overlaps.
\begin{defn}\label{def-WSC}
We say that the IFS $\{S_j\}_{j=1}^N$ satisfies the {\rm weak separation condition} (WSC) if there exists some constant $\gamma>0$ and a compact subset $D \subset {\mathbb R}^d$ with non-empty interior and $\cup_{j=1}^N S_j(D) \subset D$, such that for any $n\ge 1$ and $x\in {\mathbb R}^d$
\[
\# \{ S \in {\mathcal A}_n:\ x \in S(D)\} \le \gamma,
\]
where ${\mathcal A}_n = \{S_\bfi: \ \bfi \in {\mathcal J}_n\}$.
\end{defn}
\begin{lemma}\label{lemma-WSC}
The IFS $\{S_j\}_{j=1}^N$ satisfies the WSC if and only if for any $b>0$, there exists a constant $\gamma (= \gamma(b))$ such that for any $n$ and $D \subset {\mathbb R}^d$ with $\diam(D) \le b r^n$,
\[
\#\{ x \in X:\ |x|=n,\ S_x(K) \cap D \not= \emptyset\} \le \gamma.
\]
\end{lemma}
This is a consequence of \cite[Proposition 2.1]{[Lau-Wang-2004]}.\\
\begin{theorem}
Assume that the IFS satisfies the weak separation condition. Then the induced graphs $(X, {\mathcal E})$ and $(X, {\mathcal E}^\diamond)$ are local finite.
\end{theorem}
\begin{pf}
For any $x\in X$ with $|x|=n$, let $D= S_x(K)$. Then $\mbox{diam} D \le r^n \mbox{diam} K$. By the above Lemma, we have
\[
\#\{ y\in X: \ |y|= n-1, n \ \mbox{or } n+1,\ S_y(K) \cap D \not= \emptyset \} \le \gamma(\frac{1}{r}|K|) + \gamma(|K|) + \gamma(r|K|),
\]
where $|K| = \diam(K)$. That graphs are local finite follows from this.
\end{pf}
\bigskip
In the rest of this section, we will prove Theorem \ref{theorem-main-1}. For this, we study the graph $(X, {\mathcal E}^\diamond)$ first.\\
\begin{theorem} \label{theorem-WSC-Hyperbolic}
Suppose the IFS satisfies the WSC, or the self-similar set $K$ has positive Lebesgue measure. Then the graph $(X, {\mathcal E}^\diamond)$ is hyperbolic.
\end{theorem}
\begin{pf} By Corollary \ref{coro-IFS-diamond}, the graph is diamond. We will make use of Theorem \ref{th-hyperbolic-I} to prove the assertion.
For any $z\in X$ and any two geodesics from the root $o$ to $z$: $o=x_0, x_1, \cdots, x_n = z$; \ $\ o= y_0, y_1, \cdots, y_n = z$. We will prove $d^\diamond(x_k, y_k) \le \delta', \ (k=1,2, \cdots, n)$ for some constant $\delta' >0$ independent of $z$ and the geodesics (where $d^\diamond$ is the graph distance on $(X, {\mathcal E}^\diamond)$). For any fixed $k$, let
\[
D = \bigcup_{i=k}^n \left( S_{x_i}(K) \cup S_{y_i}(K) \right).
\]
Note that $S_{x_{i-1}}(K) \cap S_{x_i}(K) \not= \emptyset$ and $S_{y_{i-1}}(K) \cap S_{y_i}(K) \not= \emptyset$. It follows that
\[
\begin{aligned}
\mbox{diam} (D) & \le \sum_{i=k}^n \left( \mbox{diam} S_{x_i}(K) + \mbox{diam} S_{y_i}(K) \right)\\
& \le \sum_{i=k}^n 2 r^i |K| < \frac{2 |K|}{1-r} r^k.
\end{aligned}
\]
Let
\[
\{ u_1, \cdots, u_\ell \} = \{ x_i^{-(i-k)}, \ y_i^{-(i-k)}:\ i = k, \ k+1, \cdots, n\}.
\]
Then for each $u_i$, we have $D \cap S_{u_i}(K) \not= \emptyset$, and there exists $\{z_1, \cdots, z_{\ell_0}\} \subset \{u_1, \cdots, u_\ell\}$ such that $x_k = z_1$, $y_k = z_{\ell_0}$ and $S_{z_i}(K) \cap S_{z_{i+1}}(K) \not= \emptyset$ $(i=1, \cdots, \ell_0-1)$. Furthermore, we assume that $\ell_0$ is minimal. We claim that $\ell_0$ is bounded by some constant.\\
Indeed, if the IFS satisfies the WSC, then by Lemma \ref{lemma-WSC}, we have $\ell_0 \le \gamma(=\gamma(\frac{2|K|}{1-r}))$.\\
Now let us consider the case ${\mathcal L}(K) >0$, where ${\mathcal L}(\cdot)$ is the Lebesgue measure on
${\mathbb R}^d$. To prove the above $\ell_0$ is bounded by some constant, we let $D' = \cup_{i=1}^{\ell_0} S_{z_i}(K)$. Note that $S_{z_i}(K) \cap D \not= \emptyset$, ${\rm diam}(S_{z_i}(K)) \le r^k {\rm diam}(K)$, $(i=1,2, \cdots, \ell_0)$. Hence
\[
{\rm diam}(D') \le {\rm diam}(D) + 2 r^k {\rm diam}(K) \le \left( \frac{2|K|}{1-r} + 2 |K| \right) r^k := c r^k
\]
By the hypothesis, $\ell_0$ is minimal. We know that each point in $D'$ is covered by at most two $S_{z_i}(K)$. Comparing the Lebesgue measure, we have
\[
r^{(k+1)d} \ell_0 {\mathcal L}(K) \le \sum_{i=1}^{\ell_0} {\mathcal L}(S_{z_i}(K)) \le 2 {\mathcal L}(D')
\le 2 B (c r^k)^d,
\]
where $B$ is the Lebesgue measure of the unite ball in ${\mathbb R}^d$. It follows that $\ell_0 \le \frac{2 B c^d}{r^d{\mathcal L}(K)}$.
This completes the proof of the claim.\\
By the claim, there is a path $x_k = z_1, z_2, \cdots, z_{\ell_0} = y_k$ in $(X, {\mathcal E})$, and hence $d(x_k, y_k) < \ell_0$ (recall that $d$ is the graph metric on $(X, {\mathcal E})$). By Lemma \ref{lemma-3x}, we have $d^\diamond(x_k, y_k) \le \ell_0$ bounded by some constant. The assertion follows from this and Theorem \ref{th-hyperbolic-I}.
\end{pf}
\bigskip
In order to prove that the graph $(X, {\mathcal E})$ is hyperbolic, we introduce the following definition.\\
\begin{defn} \label{def-quasi}
Metric space $(X_1, \rho_1)$ is said to be {\rm quasi-isometric} to $(X_2, \rho_2)$
if there exists a map (which is called a quasi-isometry)
$f : X_1 \rightarrow X_2$ and positive constants $\tilde{L}, C$ such that\\
(i) for any $x,y \in X_1$,
\begin{equation} \label{eq-quasi}
\tilde{L}^{-1}\rho_1(x,y) - C < \rho_2(f(x), f(y)) < \tilde{L} \rho_1(x, y) + C;
\end{equation}
(ii) for every $y\in X_2$ there exists $x \in X_1$ such that $\rho_2(y, f(x)) < C$.\\
\end{defn}
Now we can compare the graph metrics $d$ and $d^\diamond$ on $(X, {\mathcal E})$ and $(X, {\mathcal E}^\diamond)$. \\
\begin{theorem}\label{theorem-quasi}
Suppose that the IFS satisfies the WSC or the self-similar set $K$ has positive Lebesgue measure, Then the identity map from the graph $(X, {\mathcal E})$ to $(X, {\mathcal E}^\diamond)$ is a quasi-isometry with the constant $\tilde{L}=1$.
\end{theorem}
\begin{pf}
By Lemma \ref{lemma-3x}, we have $d^\diamond(x, y) \le d(x,y) + 1$.
For the inverse inequality, we assume that $x= x_n, \cdots, x_1, z, y_1, \cdots, y_m = y$ is a canonical geodesic in ${\mathcal E}^\diamond$ with $z$ being on the top level.
Let
\[
D= \left( \cup_{i=1}^n S_{x_i}(K) \right) \bigcup S_z(K) \bigcup \left( \cup_{i=1}^m S_{y_i}(K) \right).
\]
Then
\[
\mbox{diam} D \le \left(\sum_{i=1}^n r^i +1 + \sum_{i=1}^m r^i \right) r^{|z|} |K| < \frac{2 |K|}{1-r} r^{|z|}.
\]
Denote
\[
\{u_1, \cdots, u_\ell\} \subset \{x_n^{-n}, \cdots, x_1^{-1}, z, y_1^{-1}, \cdots, y_m^{-m} \}.
\]
Then $D \cap S_{u_i}(K) \not= \emptyset \ (i=1,\cdots, \ell)$, and exist $\{z_1, \cdots, z_{\ell_0} \} \subset \{u_1, \cdots, u_\ell\}$ such that $z_1 = x_n^{-n}$, $z_{\ell_0} = y_m^{-m}$, and $(z_i, z_{i+1}) \in {\mathcal E}^h$, $(i=1, \cdots, \ell_0 - 1)$. Furthermore, we assume that $\ell_0$ is minimal. Then a similar argument as in the proof of Theorem \ref{theorem-WSC-Hyperbolic} shows that the above $\ell_0$ is bounded by some constant $C>0$. We see that $\pi[x_n, x_n^{-1}, \cdots, x_n^{-n}] \cup p[z_1, \cdots, z_{\ell_0}] \cup \pi[y_m^{-m}, \cdots, y_m^{-1}, y_m]$ is a path from $x= x_n$ to $y = y_m$ in $(X, {\mathcal E})$. Hence
\[
d(x,y) \le n + m + C = d^\diamond(x,y) + C.
\]
This completes the proof.
\end{pf}
\bigskip
Denote by $|x \wedge y|$ and $|x \wedge y|^\diamond$ the Gromov product on $(X, {\mathcal E})$ and $(X, {\mathcal E}^\diamond)$ respectively.
As a direct consequence of Theorem \ref{theorem-WSC-Hyperbolic} and \ref{theorem-quasi}, we have\\
\begin{theorem}
The graph $(X, {\mathcal E})$ is hyperbolic provided that the corresponding IFS satisfies the WSC or the self-similar set $K$ has positive Lebesgue measure.
\end{theorem}
\begin{pf}
Observing that
\[
|x \wedge y| = \frac{1}{2}\big{(} |x| + |y| - d(x,y) \big{)}, \quad \mbox{and} \quad |x \wedge y|^\diamond = \frac{1}{2}\big{(} |x| + |y| - d^\diamond(x,y) \big{)}.
\]
It follows that
\begin{equation}\label{eq-deff-Gromov}
\Big{|} |x \wedge y| - |x \wedge y|^\diamond \Big{|} = \frac{1}{2} \Big{|} d(x,y) - d^\diamond(x,y) \Big{|} \le \frac{C}{2},
\end{equation}
where the constant $C>0$ is as in (\ref{eq-quasi}). \\
Note that $(X, {\mathcal E}^\diamond)$ is hyperbolic. Hence there exists a constant $\delta>0$ such that
\[
|x \wedge y|^\diamond \ge \min\{|x \wedge z|^\diamond, \ |z \wedge y|^\diamond\} - \delta, \quad \forall x,y,z \in X.
\]
Thus
\[
\begin{aligned}
|x \wedge y| & \ge |x \wedge y|^\diamond - \frac{C}{2} \ge \min\{|x \wedge z|^\diamond, |z \wedge y|^\diamond\} - \delta - \frac{C}{2}\\
& \ge \min\{|x \wedge z|, |z \wedge y|\} - (\delta + C).
\end{aligned}
\]
This completes the proof.
\end{pf}
\bigskip
\begin{remark} \label{remark-section4}
In Theorem \ref{theorem-WSC-Hyperbolic} and \ref{theorem-quasi}, the IFS satisfies the WSC or the self-similar set has positive Lebesgue measure, and this implies the following condition:\\
\noindent {\rm
(C) For any $a>0$, there exists a constant $C>0$, such that for any integer $n > 0$, $D \subset {\mathbb R}^d, \ |D| \le a r^n$ and a subset $\{u_1, \cdots, u_\ell\} \subset {\mathcal J}_n$ with $S_{u_i}(K) \cap D \not=\emptyset$, $S_{u_i}(K) \cap S_{u_{i+1}}(K) \not= \emptyset$. Then there exist a subset $\{z_1, \cdots, z_{\ell_0}\} \subset \{u_1, \cdots, u_\ell\}$ such that $z_1 = u_1$, $z_{\ell_0} = u_\ell$, $S_{z_i}(K) \cap S_{z_{i+1}}(K) \not= \emptyset$ and $\ell_0 \le C$.\\
}
\indent
From the proof of Theorem \ref{theorem-WSC-Hyperbolic} and \ref{theorem-quasi}, we see that the above condition (C) implies both $(X, {\mathcal E})$ and $(X, {\mathcal E}^\diamond)$ are hyperbolic.
\end{remark}
\bigskip
\section{Hyperbolic Boundaries}
Throughout this section, we assume that the IFS satisfies the WSC or the self-similar set has positive Lebesgue measure, and hence the induced graphs $(X, {\mathcal E})$ and $(X, {\mathcal E}^\diamond)$ are hyperbolic. Denote by $\partial X$ and $\partial X^\diamond$ the hyperbolic boundaries, $\rho_a, \ \rho_a^\diamond$ the hyperbolic metrics with respectively.
It is know that if $f$ is a quasi-isometry from hyperbolic graph $(X_1, d_1)$ to $(X_2, d_2)$, then $\{x_n\}_n$ is Cauchy sequence in $X_1$ under the ultra-metric, if and only if $\{f(x_n)\}_n$ is. Moreover $\partial X_1$ and $\partial X_2$ are homeomorphism ( to see \cite{[CDP]} ). In our case, we have the following strengthen form. \\
\begin{prop}\label{theorem-equi-boundarys}
The hyperbolic boundaries $ \partial X = \partial X^\diamond$, and the hyperbolic metrics $\rho_a$ and $\rho_a^\diamond$ are equivalent, i.e., there exists a constant $C>0$ such that
\begin{equation}\label{eq-equi-boundary}
C^{-1} \rho_{a}( \xi, \eta) \le \rho_{a}^\diamond(\xi, \eta) \le C \rho_{a}(\xi, \eta), \quad \forall \xi, \eta \in \partial X.
\end{equation}
\end{prop}
\begin{pf}
Recall that a sequence $\{x_n\}_n \subset X$ with $|x_n| \to \infty$ is a Cauchy sequence under the ultra-metric $\rho_a$ if and only if $\lim_{m,n \to \infty} |x_m \wedge x_n| = \infty$, and the Cauchy sequence $\{y_n\}_n$ with $|x_n|=|y_n|$ equivalent to $\{x_n\}_n$ if and only if $d(x_n, y_n) \le c \delta$ for all but finite many $n$.
By (\ref{eq-deff-Gromov}), a sequence $\{x_n\}_n \subset X$ with $|x_n| \to \infty$ is a Cauchy sequence under the ultra-metric $\rho_{a}$ if and only if it is Cauchy in $\rho_{a}^\diamond$; moreover, by Theorem \ref{theorem-quasi}, the Cauchy sequence $\{y_n\}_n$ equivalent to $\{x_n\}_n$ in $\rho_{a}$ if and only if they are equivalent in $\rho_{a}^\diamond$. Recall an element in the hyperbolic boundary $\partial X$ is an equivalence class of Cauchy sequence in the ultra-metric $\rho_a$. Hence an element $\xi \in \partial X$ if and only if $\xi \in \partial X^\diamond$, i.e., $\partial X = \partial X^\diamond$.
Now we prove (\ref{eq-equi-boundary}). For $\xi=[\{x_n\}_n], \ \eta=[\{y_n\}_n] \in \partial X = \partial X^\diamond$, ($\xi \not= \eta$), by (\ref{eq-deff-Gromov}), we have
\[
\Big{|} |x_n \wedge y_n | - |x_n \wedge y_n|^\diamond \Big{|} \le \overline{C}, \quad \forall n.
\]
It follows that $\rho_{a}(x_n, y_n) \le e^{a \overline{C}} \rho_{a}^\diamond(x_n, y_n)$. Letting $n\to\infty$ and making use of (\ref{eq-ultra-ineq}), we have $\rho_{a}(\xi, \eta) \le C \rho_{a}^\diamond(\xi, \eta)$. The same argument implies the inverse inequality.
\end{pf}
\bigskip
To understand the topology of $(\partial X, \rho_{a})$ and $(\partial X^\diamond, \rho_{a}^\diamond)$, by the above Proposition, we need only to consider one of them. In the following, we consider $(\partial X, \rho_a)$. The arguments in \cite{[Lau-Wang-2009]} are adopted here.\\
\begin{lemma}\label{lemma-ray-II}
$\pi[u_0, u_1, \cdots]$ is a geodesic ray in the graph $(X, {\mathcal E})$ if and only if there exist $\bfi = i_1 i_2 \cdots \in \Sigma^\infty$ such that $u_n =[\bfi|_n]$ for all $n \ge 0$, where $\bfi|_n \in {\mathcal J}_n$ is the initial part of $\bfi$.
\end{lemma}
\begin{pf}
Clearly, for any $\bfi \in \Sigma^\infty$, $\pi[\bfi|_0, \bfi|_1, \cdots]$ is a geodesic ray in $(X, {\mathcal E})$ (where $\pi[\bfi|_0, \bfi|_1, \cdots]$ is abuse the notation for $\pi[[\bfi|_0], [\bfi|_1], \cdots]$). \\
Conversely, assume that $\pi[u_0, u_1, \cdots]$ is a geodesic ray in the graph $(X, {\mathcal E})$. Then for each $i\ge 0$, $(u_i, u_{i+1}) \in {\mathcal E}_0$. We use induction to construct $\bfi \in \Sigma^\infty$ as follow:
Choose any $\bfi_1 = i_1 i_2 \cdots i_k \in u_1$. If we have selected $\bfi_m = i_1, i_2 \cdots i_n \in u_\ell$, note that $(u_\ell, u_{\ell + 1}) \in {\mathcal E}_v$, by the definition of ${\mathcal E}_v$, we know that there are some $i_{n+1}, \cdots, i_{n+k'} \in \Sigma$ such that $i_1 \cdots i_n i_{n+1} \cdots i_{n+k'} \in u_{\ell + 1}$. Eventually we obtain the index $\bfi = i_1, i_2 \cdots \in \Sigma^\infty$ such that $\bfi|_n \in u_n$.
\end{pf}
\begin{lemma} \label{lemma-equi-ray}
Let $\xi = \pi[u_0, u_1, \cdots]$ be a geodesic ray in $(X, {\mathcal E})$. Then the limit $\lim_{n\to \infty} S_{u_n}(x)$ exists and is independent of $x\in {\mathbb R}^d$. Moreover, if a geodesic ray $\eta=\pi[v_0, v_1, \cdots]$ is equivalent to $\xi$, then $\lim_{n\to\infty} S_{v_n}(x) = \lim_{n\to\infty} S_{u_n}(x)$.
\end{lemma}
\begin{pf}
By Lemma \ref{lemma-ray-II}, there exists $\bfi \in \Sigma^\infty$ such that $u_n = [\bfi|_n]$, $(n=0,1, \cdots)$. It is well known that the limit $\lim_{n\to\infty} S_{\bfi|_n}(x) = \lim_{n\to\infty} S_{u_n}(x)$ exists and is independent of $x \in {\mathbb R}^d$, and the first part of the lemma follows.
For the second part, note that $\xi$ and $\eta$ are equivalent, and hence there exists some constant $c>0$ such that
$d(u_n, v_n) \le c$ for all $n \ge 0$. For each fixed $n$, there is a geodesic segment $\pi[w_1, \cdots, w_\ell]$ ($w_1 = u_n, \ w_\ell = v_n$ and $\ell \le c$) connecting $u_n$ and $v_n$. Note that $S_{w_i}(K) \cap S_{w_{i+1}}(K) \not= \emptyset$, taking any $x\in K$, we have the following estimate
\[
| S_{u_n}(x) - S_{v_n}(x)| \le \sum_{i=1}^{\ell} {\rm diam} S_{w_i}(K) \le \ell |K| r^{n- \ell} \le c |K| r^{n-c}.
\]
This implies that $\lim_{n\to\infty} S_{v_n}(x) = \lim_{n\to\infty} S_{u_n}(x)$.
\end{pf}
\bigskip
Let $\xi = \pi[u_0, u_1, \cdots]$ be a geodesic ray in $(X, {\mathcal E})$. We define
\[
\Phi(\xi) = \lim_{n\to\infty}S_{u_n}(x_0),
\]
where $x_0\in {\mathbb R}^d$. By using the above lemma, if a geodesic ray $\eta$ is equivalent to $\xi$, then $\Phi(\xi) = \Phi(\eta)$. Hence $\Phi$ induces a map (we still use $\Phi$ to denote this map) from the hyperbolic boundary $\partial X$ to the self-similar set $K$.\\
\begin{theorem} \label{theorem-homeomorphism}
The map $\Phi:\ \partial X \longrightarrow K$ is a bijection and there exists a constant $C>0$ such that
\begin{equation}\label{eq-holder-1}
|\Phi(\xi) - \Phi(\eta)| \le C \rho_{a}(\xi,\eta)^\alpha, \quad \forall \xi, \eta \in \partial X,
\end{equation}
where $\alpha= - \log r/a$. In particular $\partial X$ is homeomorphic to the self-similar set $K$.
\end{theorem}
\begin{pf}
Let $x_0 \in {\mathbb R}^d$. For any $x\in K$, there exists an index $\bfu=i_1 i_2 \cdots \in \Sigma^\infty$ such that $\lim_{n\to\infty} S_{i_1 i_2 \cdots i_n} (x_0) = x$. In particular, $\lim_{n\to\infty}S_{\bfu|_n}(x_0) = x$ (recall that $\bfu|_n = i_1 \cdots i_k$, where the integer $k$ is such that $r_1 r_2 \cdots r_k \le r^n < r_1 r_2 \cdots r^{k-1}$). This means that the image of the geodesic ray $\pi[\bfu|_0, \bfu|_1, \bfu|_2, \cdots]$ under the map $\Phi$ is $x\in K$. Hence the map is surjective. \\
To show that $\Phi$ is injective, assume that $\xi, \eta \in \partial X$. Then there are geodesic rays $\pi[x_0, x_1, \cdots]$ and $\pi[y_0, y_1, \cdots]$ converge to $\xi$ and $\eta$ respectively; moreover we assume that they attain the infimum in (\ref{eq-extend-Gromov}).
By Lemma \ref{lemma-ray-II}, there exist indexes $\bfu =i_1 i_2 \cdots , \ \bfv =j_1 j_2 \cdots \in \Sigma^\infty$ such that $x_n = \bfu|_n$ and $y_n = \bfv|_n, \ n=0, 1, \cdots$ (recall that we abuse the notation $x_n = \bfu|_n$ means that $ x_n = [\bfu|_n]$). If $\Phi(\xi) = \Phi(\eta) = x \in K$, then
\[
x \in S_{x_n}(K) \bigcap S_{y_n}(K), \ \quad n =0 ,1, 2, \cdots.
\]
Hence $(x_n, y_n) \in {\mathcal E}$. It follows that $d(x_n, y_n) \le 1$, we see that the geodesic rays $\pi[x_0, x_1, \cdots]$ and $\pi[y_0, y_1, \cdots]$ are equivalent, i.e., $\xi = \eta$. Hence the map $\Phi$ is injective. \\
Now we prove (\ref{eq-holder-1}). If the above $\xi \not= \eta$, then for any fixed $n$, there is a canonical geodesic $\pi[z_{0,n}, z_{1,n}, \cdots, z_{k_n, n}]$ ($z_{0,n}= x_n, \ z_{k_n, n} = y_n$) joining $x_n$ and $y_n$. Note that $x_{n+1}, z_{0,n} \cdots, z_{k_n, n}, y_{n+1}$ is a path (may not geodesic) from $x_{n+1}$ to $y_{n+1}$. Hence $d(x_{n+1}, y_{n+1}) \le d(x_n, y_n) + 2$. It follows that
\[
\begin{aligned}
| x_{n+1} \wedge y_{n+1} | & = \frac{1}{2} ( |x_{n+1}| + |y_{n+1}| - d(x_{n+1}, y_{n+1}) )\\
& \ge \frac{1}{2} ( |x_n| + |y_n| - d(x_{n}, y_{n}) ) = | x_n \wedge y_n |.
\end{aligned}
\]
i.e., $\{| x_n \wedge y_n | \}_{n=1}^\infty$ is an increasing sequence. On the other hand, $\xi \not= \eta$ implies that
\[
|\xi \wedge \eta | = \lim_{n\to\infty} | x_n \wedge y_n | = k \ < \infty.
\]
Note that $2 k$ is an integer. Hence there exists $m$ such that
\[
| x_n \wedge y_n | = k, \mbox{ if } n \ge m; \quad | x_n \wedge y_n | < k, \mbox{ if } n < m.
\]
To estimate $|\Phi(\xi) - \Phi(\eta)|$, we note that $\Phi(\xi) \in S_{x_{m}}(K)$ and $\Phi(\eta) \in S_{y_{m}}(K)$, hence there exist $x,y \in K$ such that $S_{x_{m}}(x)= \Phi(\xi)$ and $S_{y_{m}}(y) = \Phi(\eta)$. Recall that $\pi[z_{0,m}, \cdots, z_{k_m, m}]$ is a canonical geodesic jointing $x_m$ and $y_m$.
Assume that $z_{i, m}, \cdots, z_{i', m}$, $(0 \le i \le i' \le k_m)$ is the horizontal part of the canonical geodesic, denote by $k', \ \ell(=i'-i)$ the level and length of this segment. Then by (\ref{eq-graph2-Gromov}), we have
\[
| x_m \wedge y_m | = k = k' + \frac{\ell}{2}.
\]
For the vertical parts, we have
\[
\Phi(\xi) = S_{x_m}(x) \in S_{x_m}(K) \subset S_{z_{i,m}}(K), \quad
\Phi(\eta) = S_{y_m}(x) \in S_{y_m}(K) \subset S_{z_{i',m}}(K).
\]
It follows that
\[
| \Phi(\xi) - \Phi(\eta)| \le {\rm diam} \Big{(} \bigcup_{j=i}^{i'} S_{z_{j,m}}(K) \Big{)}
\le \sum_{j=i}^{i'} {\rm diam} \Big{(} S_{z_{j,m}}(K) \Big{)}
\le (\ell+1) |K| r^{k'}
\]
By Theorem \ref{theorem-graph-II-hyperbloic}, $\ell$ is bounded by the constant $L>0$. Hence
\[
| \Phi(\xi) - \Phi(\eta)| \le (L+1) |K| r^{k- \frac{\ell}{2}} \le (L+1) r^{-\frac{L}{2}} |K| \rho_{a,2}(\xi, \eta)^\alpha,
\]
where $\alpha = -\log r/a$, and (\ref{eq-holder-1}) follows.
By (\ref{eq-holder-1}), we know that the map $\Phi$ is continuous, and hence is a homeomorphism, since $\partial X$ and $K$ are compact. We complete the proof.
\end{pf}
\bigskip
In order to get the inverse inequality of (\ref{eq-holder-1}), we need the following condition on IFS as \cite{[Lau-Wang-2009]}:\\
\noindent
{\it
(H) There exists a constant $C'>0$ such that for any integer $n>0$ and $\bfu, \bfv \in {\mathcal J}_n$, either \[
S_\bfu(K) \cap S_\bfv(K) \not= \emptyset \quad \mbox{ or } \quad |S_\bfu(x) - S_\bfv(y)| \ge C' r^n, \quad \forall x,y\in K.
\]
}
\bigskip
\begin{prop}\label{prop-holder}
Suppose the IFS $\{S_j\}_{j=1}^N$ in Theorem \ref{theorem-homeomorphism} satisfies in addition condition (H). Then there exists a constant $C>0$ such that for any $\xi, \eta \in \partial X$,
\begin{equation}\label{eq-holder-2}
C^{-1} | \Phi(\xi) - \Phi(\eta) | \le \rho_{a}(\xi, \eta)^\alpha \le C | \Phi(\xi) - \Phi(\eta) |,
\end{equation}
where $\alpha= -\log r /a$.
\end{prop}
\begin{pf}
For $\xi=\eta$ the inequality is trivial, in the following we assume that $\xi \not= \eta$ and use the notation in the proof of Theorem \ref{theorem-homeomorphism}. It is clear that $S_{x_{k' + 1}}(K) \cap S_{y_{k'+1}}(K) = \emptyset$, where $k'$ is the level of the horizontal part of the canonical geodesic connecting $\xi$ and $\eta$ as in proof of the above Theorem.
By condition (H), we have
\[
|\Phi(\xi) - \Phi(\eta)| \ge C' r^{k' + 1} = C' r^{1- \frac{\ell}{2}} r^k \ge C \rho_a(\xi,\eta)^\alpha
\]
for some constant $C > 0$. This is the second inequality of (\ref{eq-holder-2}). The first inequality is proved in Theorem \ref{theorem-homeomorphism}.
\end{pf}
\bigskip
The above theorem can be used to study the Lipschitz equivalence relation for self-similar sets which can be found in \cite{[LL]}. \\
The following example shows that the second inequality of (\ref{eq-holder-2}) does not hold if the condition (H) fails. \\
\noindent
{\bf Example 2:} Let $p_0=(0,0),\ p_1=(1,0)$ and $p_2=(\frac{1}{2}, \frac{\sqrt{3}}{2})$ be the vertices of equilateral triangle $\Delta$ in ${\mathbb R}^2$, and let $S_i(x) = \frac{1}{3}(x+q_i), \ q_i \in {\mathbb R}^2, \ i= 0,1, \cdots, 4$ be the five maps, each maping the triangle $\Delta$ to a small triangle (to see Fig 3}). This IFS satisfies the OSC with the interior of $\Delta$ as an open set. Denote by $K$ the self-similar set of the IFS $\{S_i\}_{i=0}^4$. Then $\{p_0, p_1, p_2\} \subset K \subset \Delta$.
Let $x_0$ be the horizontal coordinate of left-bottom of triangle $S_3(\Delta)$ (the first coordinate of $S_3(p_0)$), we choose $x_0$ such that
\[
\frac{1}{2} - x_0 = \sum_{k=1}^\infty 3^{-n_k}, \quad n_k = 1 + \frac{k(k+1)}{2}, \ k =1,2, \cdots.
\]
In this example $r=\frac{1}{3}, \ X = \Sigma^* = \cup_{n=0}^\infty \{0,1,2,3,4\}^n$.
We define a sequence $\{a_i\}_{i=1}^\infty$ in the symbolic space $\Sigma = \{0,1, \cdots, 4\}$ as: $a_i=1$ if $i=n_k$ for some $k$; otherwise $a_i=0$. By using this sequence, we prove that condition (H) does not hold.\\
\begin{center}
\begin{figure}[h] \label{fig4}
\centerline{\includegraphics[width=5cm,height=4cm]{4.eps}}
\caption{\small{The maps in Example 2. }}
\end{figure}
\end{center}
We iterate the IFS $n_k$ times and get a set of small triangles $\{S_\bfu (\Delta):\ |\bfu|= n_k\}$. Consider the following three of such trangles: $\Delta_1$ is the one on the top of $S_1(\Delta)$, $\Delta_2$ the unique one in $S_3(\Delta)$ which intersects $\Delta_1$, and $\Delta_3$ the one on the left of $\Delta_2$. The corresponding codes are $\bfu_k=1 4^{n_k-1}$, $\bfv_k = 3 a_2 a_3 \cdots a_{n_k}$ and $\bfw_k = 3 a_2 a_3 \cdots a_{n_k-1}0$ with respectively. Let $x_k$ be the coordinate of right-bottom of triangle $\Delta_3$, i.e., $x_k= S_{\bfw_k}(p_1)$, and let $y= S_{\bfu_k}(p_2)$. Then
\[
| y- x_k | = \min\{ |x-x'|:\ x \in \Delta_1,\ x'\in \Delta_3\} = \sum_{i=k+1}^\infty 3^{-n_i} = c_k \cdot 3^{-n_{k+1}},
\]
where $1< c_k < \frac{3}{2}$. We see that the condition (H) does not hold.\\
Consider the geodesic rays $\xi_k= \{\bfw_k 2^\infty|_n\}_n$ and $\eta= \{1 4^\infty|_n\}$. Then $\Phi(\xi_k) = x_k$ and $\Phi(\eta) = y$. On the other hand,
\[
|\xi_k \wedge \eta| = |\bfw_k| + 1 =n_k + 1,
\]
and $\rho_{a}(\xi_k, \eta)^\alpha = 3^{- n_k - 1}$. We see that the second inequality in (\ref{eq-holder-2}) does not hold. {\hfill$\Box$}
\bigskip
Another example which does not satisfy the condition (H) can be found in \cite{[Lau-Wang-2009]}.
\bigskip
To end this paper, we ask the following question:\\
{\bf Question 1:} Does that graph $(X, {\mathcal E})$ or $(X, {\mathcal E}^\diamond)$ is local finite imply that the IFS satisfies the WSC?\\
{\bf Question 2:} Are the conditions that the IFS satisfies the WSC or the self-similar set has positive Lebesgue measure necessary for the graphs to be hyperbolic?\\
\vspace{2ex} \noindent {\bf Acknowledgements.}~~ Part of this work
was carried out while the author was visiting the
Department of Mathematics of the Chinese University of Hong Kong.
He is grateful for the discussion with professors K.S. Lau and S.M. Ngai.
|
{
"timestamp": "2012-06-28T02:01:38",
"yymm": "1206",
"arxiv_id": "1206.6171",
"language": "en",
"url": "https://arxiv.org/abs/1206.6171"
}
|
\section*{References}
|
{
"timestamp": "2012-06-28T02:02:41",
"yymm": "1206",
"arxiv_id": "1206.5972",
"language": "es",
"url": "https://arxiv.org/abs/1206.5972"
}
|
\section{Introduction}
Let $\mathfrak m=\bigoplus\limits_{p<0}\mathfrak g_p$ be a graded Lie
algebra over the f\/ield $\mathbb R$ of real numbers or
the f\/ield $\mathbb C$ of complex numbers, and let $\mu$ be a positive integer.
The graded Lie algebra $\mathfrak m=\bigoplus\limits_{p<0}\mathfrak g_p $ is
called a~fundamental graded Lie algebra if the following conditions hold:
$(i)$~$\mathfrak m$ is f\/inite-dimensional; $(ii)$~$\mathfrak g_{-1}\ne\{0\}$, and
$\mathfrak m$ is generated by $\mathfrak g_{-1}$.
Moreover a fundamental graded Lie algebra
$\mathfrak m=\bigoplus\limits_{p<0}\mathfrak g_p$ is said to be
of the $\mu$-th kind if $\mathfrak g_{-\mu}\ne\{0\}$, and $\mathfrak g_p=\{0\}$ for all $p<-\mu$.
It is shown that every fundamental graded algebra $\mathfrak m=\bigoplus\limits_{p<0}\mathfrak g_p$
is prolonged to a graded Lie algebra $\gla{g (\mathfrak m)}$ satisfying the following conditions:
$(i)$~$\mathfrak g(\mathfrak m)_p=\mathfrak g_p$ for all $p<0$;
$(ii)$~for $X\in\mathfrak g(\mathfrak m)_p$ $(p\geqq0)$,
$[X,\mathfrak m]=\{0\}$ implies $X=0$;
$(iii)$~$\mathfrak g (\mathfrak m)$ is maximum among graded Lie algebras
satisfying conditions $(i)$ and $(ii)$ above.
The graded Lie algebra $\mathfrak g (\mathfrak m)$ is called the prolongation of
$\mathfrak m$.
Note that $\mathfrak g(\mathfrak m)_0$ is the Lie algebra of all the
derivations of $\mathfrak m$ as a graded Lie algebra.
Let $\mathfrak m=\bigoplus\limits_{p<0}\mathfrak g_p$ be a fundamental graded Lie algebra of the $\mu$-th kind, where $\mu\geqq2$.
The fundamental graded Lie algebra
$\mathfrak m$ is called a free fundamental graded Lie algebra of type
$(n,\mu)$ if the following universal properties hold:
\begin{enumerate}\itemsep=0pt
\renewcommand{\labelenumi}{$(\roman{enumi})$}
\item $\dim \mathfrak g_{-1}=n$;
\item Let $\mathfrak m'=\bigoplus\limits_{p<0}\mathfrak g'_p$ be a fundamental
graded Lie algebra of the $\mu$-th kind and let $\varphi$ be a~surjective
linear mapping of $\mathfrak g_{-1}$ onto $\mathfrak g'_{-1}$.
Then $\varphi$ can be extended uniquely to a graded Lie algebra
epimorphism of $\mathfrak m$ onto $\mathfrak m'$.
\end{enumerate}
In Section~\ref{section3} we see that
a universal fundamental graded Lie algebra $b(V,\mu)$ of the $\mu$-th kind introduced
by N.~Tanaka~\cite{tan70:1} becomes
a free fundamental graded Lie algebra of type
$(n,\mu)$, where $\mu\geqq2$, and $V$ is a vector space such that
$\dim V=n\geqq2$.
In \cite{war07:1}, B.~Warhurst gave the complete list of the prolongations of
real free fundamental graded Lie algebras by using a Hall basis of a free Lie algebra.
The complex version of his theorem has the completely same form
except for the ground number f\/ield as follows:
\begin{theoremI}\label{Yatsui-theoremI}
Let $\mathfrak m=\bigoplus\limits_{p<0}\mathfrak g_p$ be a free
fundamental graded Lie algebra of type $(n,\mu)$ over $\mathbb C$.
Then
the prolongation $\gla{g (\mathfrak m)}$ of $\mathfrak m$ is
one of the following types:
\begin{enumerate}\itemsep=0pt
\renewcommand{\labelenumi}{$(\alph{enumi})$}
\item $(n,\mu)\ne(n,2)$ $(n\geqq2)$, $(2,3)$. In this case,
$\mathfrak g(\mathfrak m)_1=\{0\}$.
\item
$(n,\mu)=
(n,2)$ $(n\geqq3)$, $(2,3)$.
In this case, $\dim\mathfrak g(\mathfrak m)<\infty$ and
$\mathfrak g(\mathfrak m)_1\ne\{0\}$. Furthermore
$\mathfrak g(\mathfrak m)$ is isomorphic to a finite-dimensional simple graded
Lie algebra of type $(B_n,\{\alpha_n\})$ $(n\geqq3)$ or $(G_2,\{\alpha_1
\})$ $(n=2)$ $($see {\rm \cite{yam93:1}} or Section~{\rm \ref{section5}} for the gradations of
finite-dimensional simple graded Lie algebras over~$\mathbb C)$.
\item $(n,\mu)=(2,2)$.
In this case,
$\dim\mathfrak g(\mathfrak m)=\infty$. Furthermore,
$\mathfrak g(\mathfrak m)$ is isomorphic to the contact algebra
$K(1)$ as a graded Lie algebra.
\end{enumerate}
\end{theoremI}
The f\/irst purpose of this paper is to give a proof of Theorem~\ref{Yatsui-theoremI} by using the classif\/ication
of complex irreducible transitive graded Lie algebras of f\/inite depth
(cf.~\cite{mt70:1}).
Note that Warhurst's methods in \cite{war07:1} are available to the proof of Theorem~\ref{Yatsui-theoremI}.
Next we introduce the notion of free pseudo-product fundamental graded Lie
algebras.
Let $\mathfrak m=\bigoplus\limits_{p<0}\mathfrak g_p$ be a fundamental graded Lie algebra, and let
$\mathfrak e$ and $\mathfrak f$ be nonzero subspaces of~$\mathfrak g_{-1}$.
Then $\mathfrak m$ is called a pseudo-product fundamental graded Lie algebra
with pseudo-product structure $(\mathfrak e,\mathfrak f)$
if the following conditions hold:
$(i)$ $\mathfrak g_{-1}=\mathfrak e\oplus\mathfrak f$;
$(ii)$ $[\mathfrak e,\mathfrak e]=[\mathfrak f,\mathfrak f]=\{0\}$ (cf.~\cite{tan85:01}).
Let $\mathfrak m=\bigoplus\limits_{p<0}\mathfrak g_p$ be a pseudo-product
fundamental graded Lie algebra with a pseudo-product structure
$(\mathfrak e,\mathfrak f)$, and let
$\mathfrak g(\mathfrak m)=\bigoplus\limits_{p\in\mathbb Z}\mathfrak g(\mathfrak m)_p$ be the prolongation of $\mathfrak m$. Moreover let $\mathfrak g_0$ be the Lie algebra of all the derivations of
$\mathfrak m$ as a graded Lie algebra
preserving $\mathfrak e$ and $\mathfrak f$.
Also for $p\geqq1$ we set $\mathfrak g_p=\{X\in\mathfrak g(\mathfrak m)_p:
[X,\mathfrak g_k]\subset \mathfrak g_{p+k}\ \text{for all}\ k<0\}$ inductively.
Then the direct sum $\gla g$ becomes a graded subalgebra of $\mathfrak g(\mathfrak m)$, which is called the prolongation of
$(\mathfrak m;\mathfrak e,\mathfrak f)$.
Let $\mathfrak m=\bigoplus\limits_{p<0}\mathfrak g_p$ be a pseudo-product
fundamental graded Lie algebra of the $\mu$-th kind with pseudo-product
structure $(\mathfrak e,\mathfrak f)$, where $\mu\geqq2$.
The pseudo-product fundamental graded Lie algebra
$\mathfrak m=\bigoplus\limits_{p<0}\mathfrak g_p$ is called a free pseudo-product fundamental graded Lie algebra of type $(m,n,\mu)$
if the following conditions hold:
\begin{enumerate}\itemsep=0pt
\renewcommand{\labelenumi}{$(\roman{enumi})$}
\item $\dim\mathfrak e=m$ and $\dim\mathfrak f=n$;
\item
Let $\mathfrak m'=\bigoplus\limits_{p<0}\mathfrak g'_p$ be a pseudo-product
fundamental graded Lie algebra of the $\mu$-th kind with pseudo-product
structure $(\mathfrak e',\mathfrak f')$ and let $\varphi$ be a surjective
linear mapping of~$\mathfrak g_{-1}$ onto~$\mathfrak g'_{-1}$ such that
$\varphi(\mathfrak e)\subset\mathfrak e'$ and
$\varphi(\mathfrak f)\subset\mathfrak f'$.
Then $\varphi$ can be extended uniquely to a graded Lie algebra
epimorphism of $\mathfrak m$ onto $\mathfrak m'$.
\end{enumerate}
The main purpose of this paper is to prove the following theorem.
\begin{theoremI}\label{Yatsui-theoremII}
Let $\mathfrak m=\bigoplus\limits_{p<0}\mathfrak g_p$ be a free
pseudo-product fundamental graded Lie algebra of type $(m,n,\mu)$ with
pseudo-product structure $(\mathfrak e,\mathfrak f)$ over $\mathbb C$, and let
$\gla g$
be the prolongation of $(\mathfrak m;\mathfrak e,\mathfrak f)$.
If $\mathfrak g_{1}\ne\{0\}$, then
$\gla g$
is a finite-dimensional simple graded Lie algebra of type
$(A_{m+n},\{\alpha_m,\alpha_{m+1}\})$.
\end{theoremI}
Let $\gla g$ be the prolongation of a free pseudo-product fundamental
graded Lie algebra $\mathfrak m=\bigoplus\limits_{p<0}\mathfrak g_p$ with pseudo-product
structure $(\mathfrak e,\mathfrak f)$ over $\mathbb C$.
We denote by $\operatorname{Aut}(\mathfrak g;\mathfrak e,\mathfrak f)_0$ the group of
all the automorphisms as a graded Lie algebra preserving $\mathfrak e$ and
$\mathfrak f$, which is called the automorphism group of
the pseudo-product graded Lie algebra $\gla g$.
In Section~\ref{section9}, we show that $\operatorname{Aut}(\mathfrak g;\mathfrak e,\mathfrak f)_0$
is isomorphic to
$GL(\mathfrak e)\times GL(\mathfrak f)$.
\subsection*{Notation and conventions}
\begin{enumerate}\itemsep=0pt
\item From Section~\ref{section2} to the last section, all vector spaces are considered over
the f\/ield $\mathbb C$ of complex numbers.
\item
Let $V$ be a vector space and let $W_1$ and $W_2$ be subspaces of $V$.
We denote by $W_1\wedge W_2$ the subspace of $\Lambda^2V$
spanned by all the elements of the form $w_1\wedge w_2$
$(w_1\in W_1,w_2\in W_2)$.
\item Graded vector spaces are always $\mathbb Z$-graded. If we write
$V=\bigoplus\limits_{p<0} V_p$, then
it is understood that $V_p=\{0\}$ for all $p\geqq0$.
Let $V=\bigoplus\limits_{p\in\mathbb Z}V_p$ be a graded vector space.
We denote by $V_-$ the subspace $V=\bigoplus\limits_{p<0} V_p$. Also
for $k\in \mathbb Z$ we denote by
$V_{\leqq k}$ the subspace $\bigoplus\limits_{p\leqq k} V_p$.
Let $V=\bigoplus\limits_{p\in\mathbb Z}V_p$ and
$W=\bigoplus\limits_{p\in\mathbb Z}W_p$ be graded vector spaces.
For $r\in\mathbb Z$, we set
\[
\operatorname{Hom}(V,W)_r=\{\varphi\in\operatorname{Hom}(V,W):\varphi(V_p)\subset W_{p+r}\
\text{for all} \ p\in\mathbb Z\}.
\]
\end{enumerate}
\section{Free fundamental graded Lie algebras}\label{section2}
First of all we give several def\/initions about graded Lie algebras.
Let $\mathfrak g$ be a Lie algebra. Assume that there is given a family of
subspaces $(\mathfrak g_p)_{p\in\mathbb Z}$ of $\mathfrak g$ satisfying
the following conditions:
\begin{enumerate}\itemsep=0pt
\renewcommand{\labelenumi}{$(\roman{enumi})$}
\item $\gla g$;
\item $\dim \mathfrak g_p<\infty$ for all $p\in\mathbb Z$;
\item $[\mathfrak g_p,\mathfrak g_q]\subset \mathfrak g_{p+q}$
for all $p,q\in\mathbb Z$.
\end{enumerate}
Under these conditions, we say that $\gla g$ is a graded Lie algebra (GLA).
Moreover we def\/ine the notion of homomorphism, isomorphism, monomorphism,
epimorphism, subalgebra and
ideal for GLAs in an obvious manner.
A GLA $\gla g$ is called transitive if
for $X\in\mathfrak g_p$ $(p\geqq0)$, $[X,\mathfrak g_-]=\{0\}$ implies $X=0$,
where $\mathfrak g_-$ is the negative part
$\bigoplus\limits_{p<0}\mathfrak g_p$ of $\mathfrak g$.
Furthermore a GLA $\gla g$ is called irreducible if
the $\mathfrak g_0$-module $\mathfrak g_{-1}$ is irreducible.
Let $\mu$ be a positive integer. A GLA
$\gla g$ is said to be of depth $\mu$ if $\mathfrak g_{-\mu}\ne\{0\}$ and
$\mathfrak g_p=\{0\}$ for all $p<-\mu$.
Next we def\/ine fundamental GLAs.
A GLA $\mathfrak m=\bigoplus\limits_{p<0}\mathfrak g_p$ is
called a \textit{fundamental graded Lie algebra} (FGLA)
if the following conditions hold:
\begin{enumerate}\itemsep=0pt
\renewcommand{\labelenumi}{$(\roman{enumi})$}
\item $\dim \mathfrak m<\infty$;
\item $\mathfrak g_{-1}\ne\{0\}$, and $\mathfrak m$ is generated by $\mathfrak g_{-1}$,
or more precisely $\mathfrak g_{p-1}=[\mathfrak g_p,\mathfrak g_{-1}]$
for all $p<0$.
\end{enumerate}
If an FGLA $\mathfrak m=\bigoplus\limits_{p<0}\mathfrak g_p$
is of depth $\mu$, then $\mathfrak m$
is also said to be of the $\mu$-th kind.
Moreover an FGLA $\mathfrak m=\bigoplus\limits_{p<0}\mathfrak g_p$ is called non-degenerate if
for $X\in\mathfrak g_{-1}$, $[X,\mathfrak g_{-1}]=\{0\}$ implies $X=0$.
\par
Let $\mathfrak m=\bigoplus\limits_{p<0}\mathfrak g_p$ be an FGLA of
the $\mu$-th kind, where $\mu\geqq2$. $\mathfrak m$ is called a free
fundamental graded Lie algebra of type $(n,\mu)$
if the following conditions hold:
\begin{enumerate}\itemsep=0pt
\renewcommand{\labelenumi}{$(\roman{enumi})$}
\item $\dim \mathfrak g_{-1}=n$;
\item Let $\mathfrak m'=\bigoplus\limits_{p<0}\mathfrak g'_p$ be an FGLA
of the $\mu$-th kind and let $\varphi$ be a surjective linear
mapping of $\mathfrak g_{-1}$ onto $\mathfrak g'_{-1}$.
Then $\varphi$ can be extended uniquely to a GLA
epimorphism of $\mathfrak m$ onto $\mathfrak m'$.
\end{enumerate}
\begin{proposition}\label{prop2.1} Let $n$ and $\mu$ be positive integers such that $n,\mu\geqq2$.
\begin{enumerate}\itemsep=0pt
\item There exists a unique free FGLA of type
$(n,\mu)$ up to isomorphism.
\item Let $\mathfrak m=\bigoplus\limits_{p<0}\mathfrak g_p$ be a free
FGLA of type $(n,\mu)$.
We denote by $\operatorname{Der}(\mathfrak m)_0$ the Lie algebra of all the derivations
of $\mathfrak m$ preserving the gradation of $\mathfrak m$.
Then the mapping $\Phi:\operatorname{Der}(\mathfrak m)_0\ni D\mapsto D|\mathfrak g_{-1}\in\operatorname{\mathfrak{gl}}(\mathfrak g_{-1})$ is a Lie algebra isomorphism.
\end{enumerate}
\end{proposition}
\begin{proof} (1)
The uniqueness of a free FGLA of type $(n,\mu)$ follows from the def\/inition.
We set $X=\{1,\dots,n\}$. Let $L(X)$ be the free Lie algebra on $X$
(see \cite[Chapter~II, \S~2]{bou72:1})
and let $i:X\to L(X)$ be the canonical injection.
We def\/ine a mapping $\phi$ of $X$ into $\mathbb Z$ by $\phi(k)=-1$ $(k\in X)$.
The mapping $\phi$ def\/ines the natural gradation $(L(X)_p)_{p<0}$
on $L(X)$ such that: $(i)$ $L(X)$ is generated by $L(X)_{-1}$;
$(ii)$ $\{i(1),\dots,i(n)\}$ is a basis of $L(X)_{-1}$
(see \cite[Chapter~II, \S~2, no.~6]{bou72:1}). Note that if $n>1$, then
$L(X)_p\ne0$ for all $p<0$.
We set $\mathfrak a=\bigoplus\limits_{p<-\mu}L(X)_p$;
then $\mathfrak a$ is a graded ideal of $L(X)$ and the factor GLA $\mathfrak m=L(X)/\mathfrak a$ becomes an FGLA of the $\mu$-th kind.
We put $\mathfrak a_p=\mathfrak a\cap L(X)_p$ and
$\mathfrak g_p=L(X)_p/\mathfrak a_p$.
Now we prove that $\mathfrak m=\bigoplus\limits_{p<0}\mathfrak g_p$ is a free
FGLA of type $(n,\mu)$.
Let $\mathfrak m'=\bigoplus\limits_{p<0}\mathfrak g'_p$ be an FGLA of the
$\mu$-th kind and let
$\varphi$ be a surjective linear mapping of $\mathfrak g_{-1}$ onto
$\mathfrak g'_{-1}$.
Let $h$ be a mapping of~$X$ into $\mathfrak m'$ def\/ined by $h(k)=\varphi(i(k))$ $(k\in X)$.
Then there exists a Lie algebra homomorphism~$\tilde{h}$ of~$L(X)$
into $\mathfrak m'$ such that $\tilde{h}\circ i=h$.
Since $L(X)$ (resp.~$\mathfrak m'$) is generated by
$L(X)_{-1}$ (resp.~$\mathfrak g'_{-1}$), $\tilde{h}$ is surjective.
Since $\mathfrak m'=\bigoplus\limits_{p<0}\mathfrak g'_p$ is of the
$\mu$-th kind, $\tilde{h}(\mathfrak a)=0$, so
$\tilde{h}$ induces a GLA epimorphism~$L(\varphi)$ of
$\mathfrak m$ onto~$\mathfrak m'$ such that
$L(\varphi)|\mathfrak g_{-1}=\varphi$.
The homomorphism~$L(\varphi)$ is unique, because
$\mathfrak m=\bigoplus\limits_{p<0}\mathfrak g_p$ is generated by
$\mathfrak g_{-1}$.
Thus $\mathfrak m$ is a free FGLA of type~$(n,\mu)$.
(2) Assume that $\mathfrak m$ is a free FGLA
constructed in~(1).
Let $\phi$ be an endomorphism of $\mathfrak g_{-1}$.
By Corollary to Proposition~8 of \cite[Chapter~II, \S~2, no.~8]{bou72:1},
$\phi$ can be extended uniquely to a~unique derivation $D$ of $L(X)$.
Since $D(L(X)_{-1})=\phi(L(X)_{-1})=\phi(\mathfrak g_{-1})\subset L(X)_{-1}$,
and since~$L(X)$ is generated by $L(X)_{-1}$, we see that
$D(L(X)_{p})\subset L(X)_{p}$ and $D(\mathfrak a)\subset \mathfrak a$.
Thus there is a~derivation of $D_\phi$ of $\mathfrak m$ such that
$\pi\circ D=D_\phi\circ \pi$, where $\pi$ is the natural projection of $L(X)$ onto $\mathfrak m$.
The correspondence
$\operatorname{\mathfrak{gl}}(\mathfrak g_{-1})\ni\phi\mapsto D_\phi\in\operatorname{Der}(\mathfrak m)_0$
is an injective linear mapping.
Hence $\dim \operatorname{\mathfrak{gl}}(\mathfrak g_{-1})\leqq\dim \operatorname{Der}(\mathfrak m)_0$.
On the other hand, since $\mathfrak m$ is generated by $\mathfrak g_{-1}$,
the mapping $\Phi$ is a~Lie algebra monomorphism.
Therefore $\Phi$ is a Lie algebra isomorphism.
\end{proof}
\begin{remark}
Let $n$ and $\mu$ be positive integers with $n,\mu\geqq2$, and
let $\mathfrak m=\bigoplus\limits_{p<0}\mathfrak g_p$ be a free FGLA of
type $(n,\mu)$. Furthermore
let $\mathfrak m'=\bigoplus\limits_{p<0}\mathfrak g'_p$
be an FGLA of the $\mu$-th kind,
and let $\varphi$ be a linear mapping of $\mathfrak g_{-1}$ into
$\mathfrak g'_{-1}$.
\begin{enumerate}\itemsep=0pt
\item
From the proof of Proposition \ref{prop2.1}, there exists a unique GLA
homomorphism $L(\varphi)$ of $\mathfrak m$ into $\mathfrak m'$
such that $L(\varphi)|\mathfrak g_{-1}=\varphi$.
\item
Let $\mathfrak m''=\bigoplus\limits_{p<0}\mathfrak g''_p$
be an FGLA of the $\mu$-th kind,
and let $\varphi'$ be a linear mapping of $\mathfrak g'_{-1}$ into~$\mathfrak g''_{-1}$. Assume that $\mathfrak m'=\bigoplus\limits_{p<0}\mathfrak g'_p$ is a free FGLA. By the uniqueness of $L(\varphi'\circ\varphi)$,
we see that
$L(\varphi'\circ\varphi)=L(\varphi')\circ L(\varphi)$.
\item Assume that $\mathfrak m'=\bigoplus\limits_{p<0}\mathfrak g'_p$ is a
free FGLA and $\varphi$ is injective. By the result of (2),
$L(\varphi)$ is a~monomorphism.
\item Let $W$ be an $m$-dimensional subspace of $\mathfrak g_{-1}$ with $m\geqq2$.
By the result of (3), the subalgebra of $\mathfrak m$ generated by $W$ is a
free FGLA of type $(m,\mu)$.
\end{enumerate}
\end{remark}
By Remark 2.1 (4) and \cite[Chapter~II, \S~2, Theorem~1]{bou72:1}, we get the following lemma.
\begin{lemma}\label{lem2.1} Let $\mathfrak m=\bigoplus\limits_{p<0}\mathfrak g_p$ be a
free FGLA of type $(n,\mu)$ with $\mu\geqq3$.
If $X$, $Y$ are linearly independent elements of $\mathfrak g_{-1}$, then
\begin{gather*}
\operatorname{ad}(X)^\mu(Y)=0, \qquad \operatorname{ad}(X)^{\mu-1}(Y)\ne 0, \\
\operatorname{ad}(Y)\operatorname{ad}(X)^{\mu-1}(Y)=0, \qquad \operatorname{ad}(Y)\operatorname{ad}(X)^{\mu-2}(Y)\ne 0.
\end{gather*}
\end{lemma}
\section{Universal fundamental graded Lie algebras}\label{section3}
Following N.~Tanaka \cite{tan70:1}, we introduce universal FGLAs of the
$\mu$-th kind.
Let $V$ be an $n$-dimensional vector space.
We def\/ine vector spaces $b(V)_p$ $(p<0)$ and li\-near mappings $B_p$ of
$\sum\limits_{r+s=p}b(V)_r\wedge b(V)_s$ into $b(V)_p$ $(p\leqq -2)$
as follows:
First of all, we put \mbox{$b(V)_{-1}=V$} and $b(V)_{-2}=\Lambda^2V$. Further
we def\/ine a mapping $B_{-2}:b(V)_{-1}\wedge b(V)_{-1}\to b(V)_{-2}$
to be the identity mapping. For $k\leqq-3$, we def\/ine $b(V)_k$ and
$B_k$ inductively as follows:
We set $b(V)^{(k+1)}=\bigoplus\limits_{p=-1}^{k+1}b(V)_p$ and
we def\/ine a subspace $c(V)_k$ of $\Lambda^2(b(V)^{(k+1)})$ to be
$\sum\limits_{r+s=k}b(V)_r\wedge b(V)_s$.
We denote by $A(V)_k$ the subspace of $c(V)_k$ spanned by the elements
\[
\underset{(X,Y,Z)}{\mathfrak S}\sum_{r+s=k}\sum_{u+v=r}B_r(X_u\wedge Y_v)\wedge Z_s,
\qquad X,Y,Z\in b(V)^{(k+1)} ,
\]
where $\underset{(X,Y,Z)}{\mathfrak S}$ stands for the cyclic sum with respect to $X$, $Y$, $Z$, and $X_u$ denotes the $b(V)_u$-component in the decomposition
$b(V)^{(k+1)}=\bigoplus\limits_{p=-1}^{k+1}b(V)_p$.
Now we def\/ine $b(V)_k$ to be the factor space $c(V)_k/A(V)_k$, and
$B_k$ to be the projection of $c(V)_k$ onto $b(V)_k$.
We put $b(V)=\bigoplus\limits_{p<0}b(V)_p$ and def\/ine a bracket operation
$[\ , \ ]$ on $b(V)$ by
\[
[X,Y]=\sum_{p\leqq-2}\sum_{r+s=p}B_p(X_r\wedge Y_s)
\]
for all $X,Y\in b(V)$. Then $b(V)=\bigoplus\limits_{p<0}b(V)_p$ becomes a GLA generated by $b(V)_{-1}$, and $b(V)_p\ne0$ for all $p<0$ if $\dim V>1$.
Note that $b(V)_{-3}$ is isomorphic to $\Lambda^2(V)\otimes V/\Lambda^3V$.
Let $\mu$ be a positive integer. Assume that $\mu\geqq 2$ and $\dim V=n\geqq 2$.
Since $\bigoplus\limits_{p<-\mu}b(V)_p$ is a graded ideal of $b(V)$,
we see that the factor space $b(V,\mu)=b(V)/\bigoplus\limits_{p<-\mu}b(V)_p$
becomes an FGLA of $\mu$-th kind,
which is called a universal fundamental graded Lie algebra of the
$\mu$-th kind.
By \cite[Proposition~3.2]{tan70:1},
$b(V,\mu)$ is a free FGLA of type $(n,\mu)$.
\section{The prolongations of fundamental graded Lie algebras}\label{section4}
Following N.~Tanaka \cite{tan70:1}, we introduce the prolongations of FGLAs.
Let $\mathfrak m=\bigoplus\limits_{p<0}\mathfrak g_p$ be an FGLA.
A GLA
$\mathfrak g(\mathfrak m)
=\bigoplus\limits_{p\in\mathbb Z}\mathfrak g(\mathfrak m)_p$ is called the prolongation of $\mathfrak m$ if the following conditions hold:
\begin{enumerate}\itemsep=0pt
\renewcommand{\labelenumi}{$(\roman{enumi})$}
\item $\mathfrak g(\mathfrak m)_p=\mathfrak g_p$ for all $p<0$;
\item $\mathfrak g(\mathfrak m)$ is a transitive GLA;
\item If $\mathfrak h=\bigoplus\limits_{p\in\mathbb Z}\mathfrak h_p$ is a
GLA satisfying conditions (i) and (ii) above, then
$\mathfrak h=\bigoplus\limits_{p\in\mathbb Z}\mathfrak h_p$ can be embedded in
$\mathfrak g(\mathfrak m)$ as a GLA.
\end{enumerate}
We construct the prolongation $\mathfrak g(\mathfrak m)
=\bigoplus\limits_{p\in\mathbb Z}\mathfrak g(\mathfrak m)_p$ of $\mathfrak m$.
We set $\mathfrak g(\mathfrak m)_p=\mathfrak g_p$ $(p<0)$.
We def\/ine subspaces $\mathfrak g(\mathfrak m)_k$ $(k\geqq0)$ of
$\operatorname{Hom}(\mathfrak m,\bigoplus\limits_{p\leqq k-1}\mathfrak g(\mathfrak m)_p)_k$
and a bracket operation on $\mathfrak g(\mathfrak m)
=\bigoplus\limits_{p\in\mathbb Z}\mathfrak g(\mathfrak m)_p$ inductively.
First $\mathfrak g(\mathfrak m)_0$ is def\/ined to be $\operatorname{Der}(\mathfrak m)_0$ and a bracket operation $[\ ,\ ]:\bigoplus\limits_{p\leqq 0}\mathfrak g(\mathfrak m)_p\times \bigoplus\limits_{p\leqq 0}\mathfrak g(\mathfrak m)_p\to
\bigoplus\limits_{p\leqq 0}\mathfrak g(\mathfrak m)_p$ is def\/ined by
\begin{gather*}
[X,Y]=-[Y,X]=X(Y),\qquad X\in\mathfrak g(\mathfrak m)_0, \quad Y\in\mathfrak m,\\
[X,Y]=XY-YX, \qquad X,Y\in\mathfrak g(\mathfrak m)_0 .
\end{gather*}
Next for $k>0$ we def\/ine $\mathfrak g(\mathfrak m)_k$ $(k\geqq1)$ inductively
as follows:
\begin{gather*}
\mathfrak g(\mathfrak m)_k
=\Big\{ X\in\operatorname{Hom}\Big(\mathfrak m,\!\bigoplus_{p\leqq k-1}\mathfrak g(\mathfrak m)_p\Big)_k\!:
X([u,v])=[X(u),v]+[u,X(v)]\ \text{for all}\ u,v\in\mathfrak m\Big\},
\end{gather*}
where for $X\in\mathfrak g(\mathfrak m)_r$, $u\in\mathfrak m$,
we set $[X,u]=-[u,X]=X(u)$. Further for $X\in\mathfrak g(\mathfrak m)_k$,
$Y\in\mathfrak g(\mathfrak m)_l$ $(k,l\geqq0)$, by induction on $k+l\geqq0$,
we def\/ine $[X,Y]\in\operatorname{Hom}(\mathfrak m,\mathfrak g(\mathfrak m))_{k+l}$ by
\[
[X,Y](u)=[X,[Y,u]]-[Y,[X,u]],\qquad u\in \mathfrak m .
\]
It follows easily that $[X,Y]\in \mathfrak g(\mathfrak m)_{k+l}$.
With this bracket operation, $\mathfrak g(\mathfrak m)
=\bigoplus\limits_{p\in\mathbb Z}\mathfrak g(\mathfrak m)_p$ becomes a graded
Lie algebra satisfying conditions $(i)$, $(ii)$ and $(iii)$ above.
Let $\mathfrak m$ and $\mathfrak g(\mathfrak m)$ be as above.
Assume that we are given a subalgebra $\mathfrak g_0$ of
$\mathfrak g(\mathfrak m)_0$.
We def\/ine subspaces $\mathfrak g_k$ $(k\geqq 1)$ of
$\mathfrak g(\mathfrak m)_k$ inductively as follows:
\[
\mathfrak g_k=\{ X\in\mathfrak g(\mathfrak m)_k:
[X,\mathfrak g_p]\subset \mathfrak g_{p+k}\
\text{for all}\ p<0 \}.
\]
If we put $\gla g$, then
$\gla g$ becomes a transitive graded Lie subalgebra of $\mathfrak g(\mathfrak m)$,
which is called the prolongation of
$(\mathfrak m,\mathfrak g_0)$.
By Proposition \ref{prop2.1} (2) we get the following proposition.
\begin{proposition}\label{prop4.1} Let $\mathfrak m=\bigoplus\limits_{p<0}\mathfrak g_p$ be a
free FGLA and let $\mathfrak g(\mathfrak m)
=\bigoplus\limits_{p\in\mathbb Z}\mathfrak g(\mathfrak m)_p$
be the prolongation of $\mathfrak m$.
Then the mapping $\mathfrak g(\mathfrak m)_0\ni D\mapsto D|\mathfrak g_{-1}
\in\operatorname{\mathfrak{gl}}(\mathfrak g_{-1})$ is an isomorphism.
\end{proposition}
Conversely we obtain the following proposition.
\begin{proposition}\label{prop4.2}
Let $\mathfrak m=\bigoplus\limits_{p<0}\mathfrak g_p$ be an
FGLA of the $\mu$-th kind and let
$\mathfrak g(\mathfrak m)
=\bigoplus\limits_{p\in\mathbb Z}\mathfrak g(\mathfrak m)_p$
be the prolongation of $\mathfrak m$.
Assume that $\mathfrak g(\mathfrak m)_0$ is isomorphic to
$\operatorname{\mathfrak{gl}}(\mathfrak g_{-1})$.
If $\mu=2$ or $\mu=3$,
then $\mathfrak m$ is a~free FGLA.
\end{proposition}
\begin{proof}\looseness=-1
We put $n=\dim \mathfrak g_{-1}$. We consider a universal FGLA
$b(\mathfrak g_{-1},\mu)=\bigoplus\limits_{p<0}b(\mathfrak g_{-1},\mu)_p$ of
the $\mu$-th kind. Since $b(\mathfrak g_{-1},\mu)$
is a free FGLA of type $(n,\mu)$,
there exists a GLA epimorphism $\varphi$
of $b(\mathfrak g_{-1},\mu)$ onto $\mathfrak m$ such that the restriction
$\varphi|b(\mathfrak g_{-1},\mu)_{-1}$ is the identity mapping.
Let $\check{b}(\mathfrak g_{-1},\mu)
=\bigoplus\limits_{p\in\mathbb Z}\check{b}(\mathfrak g_{-1},\mu)_p$
be the prolongation of $b(\mathfrak g_{-1},\mu)$.
Since the mapping $\mathfrak g(\mathfrak m)_0\ni D\mapsto
D|\mathfrak g_{-1}\in\operatorname{\mathfrak{gl}}(\mathfrak g_{-1})$ is an isomorphism,
$\varphi$ can be extended to be a homomorphism $\check{\varphi}$
of $\bigoplus\limits_{p\leqq0}\check{b}(\mathfrak g_{-1},\mu)_p$ onto
$\bigoplus\limits_{p\leqq0}\mathfrak g(\mathfrak m)_p$.
Let $\mathfrak a$ be the kernel of $\check{\varphi}$;
then $\mathfrak a$ is a graded ideal of $\bigoplus\limits_{p\leqq0}\check{b}(\mathfrak g_{-1},\mu)_p$. We set $\mathfrak a_p
=\mathfrak a\cap \check{b}(\mathfrak g_{-1},\mu)_p$; then
$\mathfrak a=\bigoplus\limits_{p\leqq 0}\mathfrak a_p$.
Since the restriction of $\check{\varphi}$ to $\check{b}(\mathfrak g_{-1},\mu)_{-1}
\oplus \check{b}(\mathfrak g_{-1},\mu)_0$ is injective,
$\mathfrak a_p=\{0\}$ for $p\geqq-1$. Also
each $\mathfrak a_p$ is a $\check{b}(\mathfrak g_{-1},\mu)_0$-submodule of
$\check{b}(\mathfrak g_{-1},\mu)_p$.
From the construction of $b(\mathfrak g_{-1},\mu)$, we see that
$b(\mathfrak g_{-1},\mu)_{-2}$ (resp. $b(\mathfrak g_{-1},\mu)_{-3}$) is
isomorphic to
$\Lambda^2(\mathfrak g_{-1})$
(resp.\ $\Lambda^2(\mathfrak g_{-1})\otimes \mathfrak g_{-1}/
\Lambda^3(\mathfrak g_{-1}))$ as a $\check{b}(\mathfrak g_{-1},\mu)_0$-module.
By the table of \cite{ov90:1},
$\Lambda^2(\mathfrak g_{-1})$ and $\Lambda^2(\mathfrak g_{-1})\otimes
\mathfrak g_{-1}/\Lambda^3(\mathfrak g_{-1})$ are irreducible
$\operatorname{\mathfrak{gl}}(\mathfrak g_{-1})$-modules.
Thus we see that $\mathfrak a_{-2}=\mathfrak a_{-3}=\{0\}$.
From $\mu\leqq3$ it follows that $\varphi$ is an isomorphism.
\end{proof}
\section{Finite-dimensional simple graded Lie algebras}\label{section5}
Following \cite{yam93:1}, we f\/irst state the classif\/ication of
f\/inite-dimensional simple GLAs.
\looseness=-1
Let $\gla g$ be a f\/inite-dimensional simple GLA of the $\mu$-th kind over
$\mathbb C$ such that the negative part $\mathfrak g_-$ is an FGLA.
Let $\mathfrak h$ be a Cartan subalgebra of $\mathfrak g_0$;
then $\mathfrak h$ is a Cartan subalgebra of $\mathfrak g$ such that
$E\in \mathfrak h$, where
$E$ is the element of $\mathfrak g_0$ such that
$[E,x]=px$ for all $x\in\mathfrak g_p$ and~$p$.
Let~$\Delta$ be
a root system of $(\mathfrak g,\mathfrak h)$. For~$\alpha\in\Delta$,
we denote by $\mathfrak g^{\alpha}$ the root space corresponding to~$\alpha$. We set
$\mathfrak h_\mathbb R=\{ h\in\mathfrak h:\alpha(h)\in\mathbb R\ \text{for all}\ \alpha\in \Delta \}$
and let $(h_1,\dots,h_l)$ be a basis of $\mathfrak h_\mathbb R$ such that
$h_1=E$.
We def\/ine the set of positive roots $\Delta^+$ as the set of roots
which are positive with respect to the lexicographical ordering
in $\mathfrak h_{\mathbb R}^*$ determined by the basis $(h_1,\dots,h_l)$ of
$\mathfrak h_{\mathbb R}$.
Let $\Pi\subset \Delta^+$ be the corresponding simple root system.
We denote by $\{m_1,\dots,m_l\}$ the coordinate functions
corresponding to $\Pi$, i.e., for $\alpha\in\Delta$,
we can write $\alpha=\sum\limits_{i=1}^l m_i(\alpha)\alpha_i$.
We set $\alpha_i(E)=s_i$ and $\bm s=(s_1,\dots,s_l)$;
then each $s_i$ is a non-negative integer.
For $\alpha\in\Delta$, we call the integer
$\ell_{\bm s}(\alpha)=\sum\limits_{i=1}^l m_i(\alpha)s_i$ the
$\bm s$-length of $\alpha$.
We put $\Delta_p=\{ \alpha\in\Delta: \ell_{\bm s}(\alpha)=p \}$,
$\Pi_p=\Delta_p\cap \Pi$ and $I=\{ i\in\{1,\dots,l\}: s_i=1 \}$.
Let $\theta$ be the highest root of $\mathfrak g$; then
$\ell_{\bm s}(\theta)=\mu$. Also since the $\mathfrak g_0$-module
$\mathfrak g_{-\mu}$ is irreducible,
$\dim \mathfrak g_{-\mu}=1$
if and only if $\langle \theta,\alpha\spcheck_i\rangle=0$ for all
$i\in\{1,\dots,l\}\setminus I$, where $\{\alpha\spcheck_i\}$ is the simple root system of the dual root system $\Delta\spcheck$ of $\Delta$ corresponding
to $\{\alpha_i\}$.
In our situation, since $\mathfrak g_-$ is generated by
$\mathfrak g_{-1}$, we have $s_i=0$ or 1 for all $i$.
The $l$-tuple $\bm s=(s_1,\dots,s_l)$ of non-negative integers
is determined only by the ordering of $(\alpha_1,\dots,\alpha_l)$.
In what follows, we assume that the ordering of
$(\alpha_1,\dots,\alpha_l)$ is as in the table of~\cite{bou68:1}.
If $\mathfrak g$ has the Dynkin diagram of type $X_l$ $(X=A,\dots,G)$,
then the simple GLA $\gla g$
is said to be of type $(X_l,\Pi_1)$.
Here we remark that for an automorphism $\bar{\mu}$ of the
Dynkin diagram, a~simple GLA of type $(X_l,\Pi_1)$
is isomorphic to that of type $(X_l,\bar{\mu}(\Pi_1))$.
We will identify a~simple GLA of type $(X_l,\Pi_1)$
with that of type $(X_l,\bar{\mu}(\Pi_1))$.
For $i\in I$, we put $\Delta_p^{(i)}=\{\alpha\in\Delta:
m_i(\alpha)=p\ \text{and}\ m_j(\alpha)=
0\ \text{for}\ j\in I\setminus\{i\} \}$ and
$\mathfrak g_p^{(i)}=\sum\limits_{\alpha\in\Delta_p^{(i)}}\mathfrak g^\alpha$;
then
$\mathfrak g_{-1}^{(i)}$ is an irreducible
$\mathfrak g_0$-submodule of $\mathfrak g_{-1}$ with
highest weight $-\alpha_i$.
In particular, if the $\mathfrak g_0$-module $\mathfrak g_{-1}$
is irreducible, then $\#(I)=1$.
{\samepage For $i\in I$,
we denote by $\mathfrak g^{(i)}$ the subalgebra of $\mathfrak g$ generated by
$\mathfrak g_{-1}^{(i)}\oplus \mathfrak g_{1}^{(i)}$;
then $\mathfrak g^{(i)}$ is a~simple GLA whose
Dynkin diagram is the connected component containing the vertex $i$ of
the sub\-diagram of~$X_l$
corresponding to vertices $(\{1,\dots,l\}\setminus I)\cup\{i\}$.
We denote by $\theta^{(i)}$ the highest root of $\mathfrak g^{(i)}$.
Then $[\mathfrak g_{-1}^{(i)},\mathfrak g_{-1}^{(i)}]=\{0\}$ if and only if
$m_i(\theta^{(i)})=1$.
}
From Theorem 5.2 of \cite{yam93:1}, we obtain the following theorem:
\begin{theorem}\label{thm5.1}
Let $\gla g$ be a finite-dimensional simple GLA over $\mathbb C$ such that
$\mathfrak g_-$ is an FGLA and
the $\mathfrak g_0$-module $\mathfrak g_{-1}$
is irreducible.
Then $\gla g$ is the prolongation of $\mathfrak g_-$ except for
the following cases:
\begin{enumerate}\itemsep=0pt
\renewcommand{\labelenumi}{$(\alph{enumi})$}
\item $\mathfrak g_-$ is of the first kind;
\item $\mathfrak g_-$ is of the second kind and $\dim\mathfrak g_{-2}=1$.
\end{enumerate}
\end{theorem}
Let $\gla g$ be a f\/inite-dimensional simple GLA.
Now we assume that $\mathfrak g_0$ is isomorphic to $\operatorname{\mathfrak{gl}}(\mathfrak g_{-1})$;
then the $\mathfrak g_0$-module $\mathfrak g_{-1}$ is irreducible.
The derived subalgebra $[\mathfrak g_0,\mathfrak g_0]$ of $\mathfrak g_0$
is a~semisimple Lie algebra whose Dynkin diagram is the subdiagram of
$X_l$ consisting
of the vertices $\{1,\dots,l\}\setminus I$.
Since $[\mathfrak g_0,\mathfrak g_0]$ is of type $A_{l-1}$ and since
the $\mathfrak g_0$-module $\mathfrak g_{-1}$ is elementary,
$(X_l,\Delta_1)$ is one of the following cases:
\[
(A_l,\{\alpha_1\}),\qquad (B_l,\{\alpha_l\}), \quad l\geqq2,
\qquad (G_2,\{\alpha_1\}).
\]
From this result and Propositions \ref{prop4.1} and \ref{prop4.2}, we get the following theorem:
\begin{theorem}\label{thm5.2}
Let $\gla g$ be a finite-dimensional simple GLA of type
$(X_l,\Pi_1)$ over $\mathbb C$ satisfying the following conditions:
\begin{enumerate}\itemsep=0pt
\renewcommand{\labelenumi}{$(\roman{enumi})$}
\item $\mathfrak g_-$ is an FGLA of the $\mu$-th kind;
\item The $\mathfrak g_0$-module $\mathfrak g_{-1}$ is irreducible;
\item $\mathfrak g_0$ is isomorphic to $\operatorname{\mathfrak{gl}}(\mathfrak g_{-1})$;
\item $\mathfrak g$ is the prolongation of $\mathfrak g_{-}$.
\end{enumerate}
Then $\mathfrak g_-$ is a free FGLA of type
$(l,\mu)$, and
$\gla g$ is one of the following types:
\begin{enumerate}\itemsep=0pt
\renewcommand{\labelenumi}{$(\alph{enumi})$}
\item $l\geqq3$, $\mu=2$, $(X_l,\Pi_1)=(B_l,\{\alpha_l\})$.
\item $l=2$, $\mu=3$, $(X_l,\Pi_1)=(G_2,\{\alpha_1\})$.
\end{enumerate}
\end{theorem}
\section[Graded Lie algebras $W(n)$, $K(n)$ of Cartan type]{Graded Lie algebras $\boldsymbol{W(n)}$, $\boldsymbol{K(n)}$ of Cartan type}\label{section6}
In this section, following V.G.~Kac \cite{kac68:1},
we describe Lie algebras $W(n)$, $K(n)$ of Cartan type and their standard gradations.
Let $A(m)$ denote the monoid (under addition) of all $m$-tuples of
non-negative integers.
For an $m$-tuple
$\bm s=(s_1,\dots,s_m)$ of positive integers and
$\alpha=(\alpha_1,\dots,\alpha_m)\in A(m)$
we set $\|\alpha\|_{\bm s}=\sum\limits_{i=1}^ms_i\alpha_i$.
Also we denote the $m$-tuple $(1,\dots,1)$ by ${\bm 1}_m$
and we denote the
$(m+1)$-tuple $(1,\dots,1,2)$ by $({\bm 1}_m,2)$.
Let $\mathfrak A(m)=\mathbb C[x_1,\dots,x_m]$.
For any $m$-tuple $\bm s$
of positive integers, we denote by $\mathfrak A(m;\bm s)_p$
the subspace of $\mathfrak A(m)$ spanned by polynomials
\[
x^{\alpha}=x_1^{\alpha_1}\cdots x_m^{\alpha_m}, \qquad
\alpha=(\alpha_1,\dots,\alpha_m)\in A(m),\quad \|\alpha\|_{\bm s}=p .
\]
Let $W(m)$ be the Lie algebra consisting of all the polynomial vector f\/ields
\begin{gather}\label{eq6.1}
\sum\limits_{i=1}^m P_i\frac{\partial}{\partial x_i},
\qquad P_i\in\mathfrak A(m) .
\end{gather}
For an $m$-tuple $\bm s=(s_1,\dots,s_m)$ of positive integers,
we denote by $W(m;\bm s)_p$
the subspaces of $W(m)$ consisting of those polynomial vector f\/ields \eqref{eq6.1}
such that the polynomials $P_i$ are contained in
$\mathfrak A(m;\bm s)_{p+s_i}$;
then
$W(m;\bm s)=\bigoplus\limits_{p\in\mathbb Z}W(m;\bm s)_p$
is a transitive GLA.
In particular, $W(m;{\bm 1}_m)=\bigoplus\limits_{p\geqq-1}W(m;{\bm 1}_m)_p$ is
a transitive irreducible GLA
such that: $(i)$ $W(m;{\bm 1}_m)_0$ is isomorphic to $\operatorname{\mathfrak{gl}}(m,\mathbb C)$;
$(ii)$ the $W(m;{\bm 1}_m)_0$-module $W(m;{\bm 1}_m)_{-1}$ is elementary; $(iii)$
$W(m;{\bm 1}_m)$ is the prolongation of $W(m;{\bm 1}_m)_-$.
We now consider the following dif\/ferential form
\[
\omega_K=dx_{2n+1}-\sum_{i=1}^nx_{i+n}dx_{i}.
\]
Def\/ine
\[
K(n)=\{ D\in W(2n+1):D\omega_K\in\mathfrak A(2n+1)\omega_K \}.
\]
(Here the action of $D$ on the dif\/ferential forms is extended from
its action $\mathfrak A(2n+1)$ by requiring that $D$ be derivation of
the exterior algebra satisfying $D(df)=d(Df)$, where
$df=\sum\frac{\partial f}{\partial x_i}dx_i$, $f\in\mathfrak A(m)$.)
We set $K(n)_p=W(2n+1;({\bm 1}_{2n},2))_p\cap K(n)$.
Then $K(n)=\bigoplus\limits_{p\geqq-2}K(n)_p$
is a transitive irreducible GLA
such that: $(i)$
$K(n)_0$ is isomorphic to $\operatorname{\mathfrak{csp}}(n,\mathbb C)$;
$(ii)$ the $K(n)_0$-module $K(n)_{-1}$ is elementary;
$(iii)$ $K(n)$ is the prolongation of $K(n)_-$
(cf.~\cite{kac68:1, mor88:1}).
From Proposition 2.2 of \cite{mt70:1}, we get
\begin{theorem}\label{thm6.1} Let $\gla g$ be a transitive GLA over $\mathbb C$
satisfying the following conditions:
\begin{enumerate}\itemsep=0pt
\renewcommand{\labelenumi}{$(\roman{enumi})$}
\item $\mathfrak g_-$ is an FGLA of the
$\mu$-th kind;
\item $\mathfrak g$ is infinite-dimensional;
\item The $\mathfrak g_0$-module $\mathfrak g_{-1}$ is irreducible;
\item $\mathfrak g$ is the prolongation of $\mathfrak g_-$.
\end{enumerate}
Then $\mu\leqq2$ and $\gla g$ is isomorphic to $W(m;{\bm 1}_m)$ or $K(n)$.
\end{theorem}
\section{Classif\/ication of the prolongations\\ of free
fundamental graded Lie algebras}\label{section7}
Let $\mathfrak m=\bigoplus\limits_{p<0}\mathfrak g_p$ be a free FGLA of type
$(n,\mu)$ over $\mathbb C$, and let $\mathfrak g(\mathfrak m)
=\bigoplus\limits_{p\in\mathbb Z}\mathfrak g(\mathfrak m)_p$ be the
prolongation of~$\mathfrak m$.
First of all, we assume that $\dim \mathfrak g(\mathfrak m)=\infty$.
By Theorem~\ref{thm6.1},
$\mathfrak g(\mathfrak m)$ is isomorphic to $K(m)$
as a GLA, where $n=2m$.
Since $K(m)_0$ is isomorphic to $\operatorname{\mathfrak{csp}}(m,\mathbb C)$ and since
$\mathfrak g(\mathfrak m)_0$ is isomorphic to $\operatorname{\mathfrak{gl}}(n,\mathbb C)$,
we see that $m=1$.
Therefore $\mathfrak g(\mathfrak m)$ is isomorphic to
$K(1)$ as a GLA.
Next we assume that $\dim \mathfrak g(\mathfrak m)<\infty$
and $\mathfrak g(\mathfrak m)_1\ne0$.
Since the $\mathfrak g(\mathfrak m)_0$-module $\mathfrak g(\mathfrak m)_{-1}$
is irreducible,
$\mathfrak g(\mathfrak m)$ is a f\/inite-dimensional simple GLA (see \cite{kn64:1,och70:1}).
By Theorem~\ref{thm5.2}, $\mathfrak g(\mathfrak m)$ is isomorphic to one of the following types:
\[
(B_l,\{\alpha_l\})\quad l\geqq3,\qquad (G_2,\{\alpha_1\}).
\]
Thus we get a proof of the following theorem:
\begin{theorem}\label{thm7.1}
Let $\mathfrak m=\bigoplus\limits_{p<0}\mathfrak g_p$ be a free FGLA of type
$(n,\mu)$ over $\mathbb C$, and let $\gla{g (\mathfrak m)}$ be
the prolongation of $\mathfrak m$.
Then one of the following cases occurs:
\begin{enumerate}\itemsep=0pt
\renewcommand{\labelenumi}{$(\alph{enumi})$}
\item $(n,\mu)\ne(n,2)$ $(n\geqq2)$, $(2,3)$. In this case,
$\mathfrak g(\mathfrak m)_1=\{0\}$.
\item
$(n,\mu)=
(n,2)$ $(n\geqq3)$, $(2,3)$.
In this case, $\dim\mathfrak g(\mathfrak m)<\infty$ and
$\mathfrak g(\mathfrak m)_1\ne\{0\}$. Furthermore
$\mathfrak g(\mathfrak m)$ is isomorphic to a finite-dimensional simple GLA of
type $(B_n,\{\alpha_n\})$ $(n\geqq3)$ or $(G_2,\{\alpha_1\})$ $(n=2)$.
\item $(n,\mu)=(2,2)$.
In this case,
$\dim\mathfrak g(\mathfrak m)=\infty$. Furthermore,
$\mathfrak g(\mathfrak m)$ is isomorphic to $K(1)$ as a GLA.
\end{enumerate}
\end{theorem}
\section{Free pseudo-product fundamental graded Lie algebras}\label{section8}
An FGLA $\mathfrak m=\bigoplus\limits_{p<0}\mathfrak g_p$ equipped with
nonzero subspaces $\mathfrak e$, $\mathfrak f$ of $\mathfrak g_{-1}$ is called
a pseudo-product FGLA if the following conditions hold:
\begin{enumerate}\itemsep=0pt
\renewcommand{\labelenumi}{$(\roman{enumi})$}
\item $\mathfrak g_{-1}=\mathfrak e\oplus\mathfrak f$;
\item $[\mathfrak e,\mathfrak e]=[\mathfrak f,\mathfrak f]=\{0\}$.
\end{enumerate}
The pair $(\mathfrak e,\mathfrak f)$ is called the pseudo-product structure of
the pseudo-product FGLA
$\mathfrak m=\bigoplus\limits_{p<0}\mathfrak g_p$.
We will also denote by the triplet $(\mathfrak m;\mathfrak e,\mathfrak f)$
the pseudo-product FGLA $\mathfrak m=\bigoplus\limits_{p<0}\mathfrak g_p$
with pseudo-product structure $(\mathfrak e,\mathfrak f)$.
Let $\mathfrak m=\bigoplus\limits_{p<0}\mathfrak g_p$ (resp.
$\mathfrak m'=\bigoplus\limits_{p<0}\mathfrak g'_p$) be a
pseudo-product FGLA with pseudo-product structure $(\mathfrak e,\mathfrak f)$
(resp. $(\mathfrak e',\mathfrak f')$).
We say that two pseudo-product FGLAs $(\mathfrak m;\mathfrak e,\mathfrak f)$
and $(\mathfrak m';\mathfrak e',\mathfrak f')$
are isomorphic if there exists a GLA isomorphism
$\varphi$ of $\mathfrak m$ onto $\mathfrak m'$ such that
$\varphi(\mathfrak e)=\mathfrak e'$ and $\varphi(\mathfrak f)=\mathfrak f'$.
\begin{proposition}\label{prop8.1} Let $\mathfrak m=\bigoplus\limits_{p<0}\mathfrak g_p$ be a
pseudo-product FGLA of the $\mu$-th kind with pseudo-product
structure $(\mathfrak e,\mathfrak f)$.
If $\mathfrak m$ is a free FGLA of type $(n,\mu)$,
then $n=2$.
\end{proposition}
\begin{proof} Let $(e_1,\dots,e_m)$ (resp.\ $(f_1,\dots,f_l)$) be a basis of
$\mathfrak e$ (resp.~$\mathfrak f$).
Since $[\mathfrak e,\mathfrak f]=\mathfrak g_{-2}$,
the space~$\mathfrak g_{-2}$ is generated by
$\{[e_i,f_j]:i=1,\dots,m,j=1,\dots,l\}$ as a vector space, so
$\dim \mathfrak g_{-2}\leqq ml$. On the other hand,
since $\mathfrak m$ is a free FGLA,
\[
\dim \mathfrak g_{-2}=\dim b(\mathfrak g_{-1},\mu)_{-2}=
\dim \Lambda^2(\mathfrak g_{-1})=\frac{(m+l)(m+l-1)}{2},
\]
so $ml\geqq \frac{(m+l)(m+l-1)}{2}$.
From this fact it follows that $m=l=1$.
\end{proof}
Let $\mathfrak m=\bigoplus\limits_{p<0}\mathfrak g_p$ be a pseudo-product FGLA
of the $\mu$-th kind with
pseudo-product structure $(\mathfrak e,\mathfrak f)$, where $\mu\geqq2$.
$\mathfrak m$ is called a free pseudo-product FGLA
of type $(m,n,\mu)$ if the following conditions hold:
\begin{enumerate}\itemsep=0pt
\renewcommand{\labelenumi}{$(\roman{enumi})$}
\item $\dim\mathfrak e=m$ and $\dim\mathfrak f=n$;
\item Let $\mathfrak m'=\bigoplus\limits_{p<0}\mathfrak g'_p$ be a pseudo-product FGLA of the $\mu$-th kind with
pseudo-product structure $(\mathfrak e',\mathfrak f')$
and let $\varphi$ be a surjective linear mapping of $\mathfrak g_{-1}$ onto
$\mathfrak g'_{-1}$ such that
$\varphi(\mathfrak e)\subset\mathfrak e'$ and
$\varphi(\mathfrak f)\subset\mathfrak f'$.
Then $\varphi$ can be extended uniquely to a GLA
epimorphism of $\mathfrak m$ onto $\mathfrak m'$.
\end{enumerate}
\begin{proposition}\label{prop8.2} Let $m$, $n$ and $\mu$ be positive integers such that $\mu\geqq2$.
\begin{enumerate}\itemsep=0pt
\item There exists a unique free pseudo-product FGLA
of type
$(m,n,\mu)$ up to isomorphism.
\item Let $\mathfrak m=\bigoplus\limits_{p<0}\mathfrak g_p$ be a free
pseudo-product FGLA of type $(m,n,\mu)$
with pseudo-product structure $(\mathfrak e,\mathfrak f)$.
We denote by $\operatorname{Der}(\mathfrak m;\mathfrak e,\mathfrak f)_0$ the Lie algebra of
all the derivations of $\mathfrak m$ preserving the gradation of $\mathfrak m$, $\mathfrak e$ and $\mathfrak f$.
Then the mapping $\Phi:\operatorname{Der}(\mathfrak m;\mathfrak e,\mathfrak f)_0\ni D\mapsto (D|\mathfrak e,D|\mathfrak f)\in\operatorname{\mathfrak{gl}}(\mathfrak e)\times\operatorname{\mathfrak{gl}}(\mathfrak f)$ is a Lie algebra isomorphism.
\end{enumerate}
\end{proposition}
\begin{proof} (1)
The uniqueness of a free pseudo-product FGLA of type $(m,n,\mu)$ follows from the def\/inition.
Let $V$ be an $(m+n)$-dimensional vector space and let
$\mathfrak e$, $\mathfrak f$ be subspaces of $V$
such that $V=\mathfrak e\oplus \mathfrak f$,
$\dim \mathfrak e=m$ and $\dim \mathfrak f=n$.
Let $\mathfrak a=\bigoplus\limits_{p<0}\mathfrak a_p$ be the graded ideal
of $b(V,\mu)$ generated by
$[\mathfrak e,\mathfrak e]+[\mathfrak f,\mathfrak f]$.
We set $\mathfrak m=b(V,\mu)/\mathfrak a$,
$\mathfrak g_p=b(V,\mu)_p/\mathfrak a_p$.
Clearly $\mathfrak m=\bigoplus\limits_{p<0}\mathfrak g_p$ is
a~pseudo-product FGLA.
We show that the factor algebra $\mathfrak m$
is a free pseudo-product FGLA of type $(m,n,\mu)$.
First we prove that $\mathfrak m$ is of the $\mu$-th kind.
Let $\mathfrak n=\bigoplus\limits_{p<0}\mathfrak g''_p$ be
a free FGLA of type $(2,\mu)$ and let~$\mathfrak e''$ and~$\mathfrak f''$
be one-dimensional subspaces of $\mathfrak g''_{-1}$ such that
$\mathfrak g''_{-1}=\mathfrak e''\oplus\mathfrak f''$.
Let~$\varphi_1$ be an injective linear mapping of $\mathfrak g''_{-1}$
into $V$ such that
$\varphi_1(\mathfrak e'')\subset \mathfrak e$ and
$\varphi_1(\mathfrak f'')\subset \mathfrak f$.
Let~$\varphi_2$ be a linear mapping of~$V$
into $\mathfrak g''_{-1}$ such that
$\varphi_2\circ\varphi_1=1_{\mathfrak g''_{-1}}$,
$\varphi_2(\mathfrak e)=\mathfrak e''$ and
$\varphi_2(\mathfrak f)=\mathfrak f''$.
There exists
a~homomorphism~$L(\varphi_1)$ (resp.~$L(\varphi_2)$) of
$\mathfrak n$ (resp.~$b(V,\mu)$) into
$b(V,\mu)$ (resp.~$\mathfrak n$)
such that
$L(\varphi_1)|\mathfrak g''_{-1}=\varphi_1$
(resp.\ $L(\varphi_2)|V=\varphi_2$).
Since $L(\varphi_2)([\mathfrak e,\mathfrak e]+[\mathfrak f,\mathfrak f])=\{0\}$,
$L(\varphi_2)$ induces a~homomorphism $\hat{L}(\varphi_2)$ of
$\mathfrak m$ into $\mathfrak n$ such that
$L(\varphi_2)=\hat{L}(\varphi_2)\circ \pi$,
where $\pi$ is the natural projection of $b(V,\mu)$ onto
$\mathfrak m$. Since
\[
1_{\mathfrak n}=L(\varphi_2)\circ L(\varphi_1)
=\hat{L}(\varphi_2)\circ \pi\circ L(\varphi_1),
\]
$\pi\circ L(\varphi_1)$ is a monomorphism of $\mathfrak n$ into $\mathfrak m$, so $\mathfrak g_{-\mu}\ne\{0\}$.
Thus $\mathfrak m$ is of the $\mu$-th kind.
Let $\mathfrak m'=\bigoplus\limits_{p<0}\mathfrak g'_p$ be a pseudo-product
FGLA of the $\mu$-th kind
with pseudo-product structure $(\mathfrak e',\mathfrak f')$
and let $\phi$ be a surjective linear mapping of $b(V,\mu)_{-1}$ onto
$\mathfrak g'_{-1}$ such that
$\phi(\mathfrak e)\subset\mathfrak e'$ and
$\phi(\mathfrak f)\subset\mathfrak f'$.
By the def\/inition of a free FGLA,
there exists a GLA epimorphism $L(\phi)$ of
$b(V,\mu)$ onto $\mathfrak m'$ such that
$L(\phi)|b(V,\mu)_{-1}=\phi$.
Since $L(\phi)([\mathfrak e,\mathfrak e]+[\mathfrak f,\mathfrak f])\subset [\mathfrak e',\mathfrak e']+[\mathfrak f',\mathfrak f']=\{0\}$,
we see that $L(\phi)(\mathfrak a)=\{0\}$, so
the epimorphism $L(\phi)$ induces a GLA epimorphism
$\hat{L}(\phi)$ of $\mathfrak m$ onto $\mathfrak m'$ such that
$\hat{L}(\phi)|\mathfrak g_{-1}=\phi$.
(2)~We may prove the fact that the mapping $\Phi$ is surjective.
Let $\phi$ be an endomorphism of~$\mathfrak g_{-1}$ such that
$\phi(\mathfrak e)\subset \mathfrak e$ and
$\phi(\mathfrak f)\subset \mathfrak f$.
By Proposition~\ref{prop2.1}~(2), there exists a $D\in\operatorname{Der}(b(V,\mu))_0$
such that $D|b(V,\mu)_{-1}=\phi$.
Since $D([\mathfrak e,\mathfrak e]+[\mathfrak f,\mathfrak f])
\subset [\mathfrak e,\mathfrak e]+[\mathfrak f,\mathfrak f]$,
$D$ induces a derivation~$\hat{D}$ of~$\mathfrak m$ such that
$\hat{D}|\mathfrak g_{-1}=\phi$.
\end{proof}
\begin{remark}\label{rem8.1}
Let $m$, $n$, $m'$, $n'$ and $\mu$ be positive integers with $\mu\geqq2$, and let
$\mathfrak m=\bigoplus\limits_{p<0}\mathfrak g_p$
(resp.\ $\mathfrak m'=\bigoplus\limits_{p<0}\mathfrak g'_p$) be a
free pseudo-product FGLA of type $(m,n,\mu)$ $($resp.~$(m',n',\mu))$
with pseudo-product structure $(\mathfrak e,\mathfrak f)$
(resp. $(\mathfrak e',\mathfrak f'))$.
Furthermore
let $\varphi$ be a linear mapping of $\mathfrak g_{-1}$ into
$\mathfrak g'_{-1}$ such that $\varphi(\mathfrak e)\subset \mathfrak e'$ and
$\varphi(\mathfrak f)\subset \mathfrak f'$.
\begin{enumerate}\itemsep=0pt
\item
From the proof of Proposition~\ref{prop8.2}, there exists a unique GLA
homomorphism $\hat{L}(\varphi)$ of $\mathfrak m$ into $\mathfrak m'$
such that $\hat{L}(\varphi)|\mathfrak g_{-1}=\varphi$.
If $\varphi$ is injective, then $\hat{L}(\varphi)$ is a monomorphism.
\item
Assume that $m=n=1$ and $\varphi$ is injective.
Then $\hat{L}(\varphi)(\mathfrak m)$ is a graded subalgebra of $\mathfrak m'$
isomorphic to a free FGLA of type $(2,\mu)$.
From this result,
the subalgebra of $\mathfrak m'$ generated by a nonzero element $X$ of
$\mathfrak e'$ and a nonzero element $Y$ of $\mathfrak f'$
is a free FGLA of type $(2,\mu)$.
\end{enumerate}
\end{remark}
Let $\mathfrak m=\bigoplus\limits_{p<0}\mathfrak g_p$ be a pseudo-product FGLA
of the $\mu$-th kind with pseudo-product structure~$(\mathfrak e,\mathfrak f)$.
We denote by $\mathfrak g_0$ the Lie algebra of
all the derivations of $\mathfrak m$ preserving the gradation of~$\mathfrak m$,~$\mathfrak e$ and~$\mathfrak f$:
\[
\mathfrak g_0=\{ D\in\operatorname{Der}(\mathfrak g)_0:D(\mathfrak e)\subset \mathfrak e,
D(\mathfrak f)\subset \mathfrak f \}.
\]
The prolongation
$\gla g$ of $(\mathfrak m,\mathfrak g_0)$ is called the prolongation of
$(\mathfrak m;\mathfrak e,\mathfrak f)$.
A transitive GLA $\gla g$ is called a pseudo-product GLA
if there are given nonzero subspaces $\mathfrak e$ and $\mathfrak f$ of $\mathfrak g_{-1}$ satisfying the following conditions:
\begin{enumerate}\itemsep=0pt
\renewcommand{\labelenumi}{$(\roman{enumi})$}
\item The negative part $\mathfrak g_{-}$ is
a pseudo-product
FGLA with pseudo-product structure
$(\mathfrak e,\mathfrak f)$;
\item $[\mathfrak g_0,\mathfrak e]\subset \mathfrak e$
and $[\mathfrak g_0,\mathfrak f]\subset \mathfrak f$.
\end{enumerate}
The pair $(\mathfrak e,\mathfrak f)$ is called the pseudo-product structure of
the pseudo-product GLA
$\gla g$.
If the $\mathfrak g_0$-modules $\mathfrak e$ and $\mathfrak f$ are
irreducible,
then the pseudo-product GLA $\gla g$ is said to be
of irreducible type.
The following lemma is due to N.~Tanaka (cf.~\cite{tan89:01}).
Here we give a proof for the convenience of the readers.
\begin{lemma}\label{lemma8.1} Let $\gla g$ be a pseudo-product GLA of
depth $\mu$ with pseudo-product structure~$(\mathfrak e,\mathfrak f)$.
\begin{enumerate}\itemsep=0pt
\item If $\mathfrak g_-$ is non-degenerate,
then $\mathfrak g$ is finite-dimensional.
\item If $\gla g$ is of irreducible type and $\mu\geqq2$, then
$\mathfrak g$ is finite-dimensional.
\end{enumerate}
\end{lemma}
\begin{proof} (1) The proof is essentially due to the proof of
\cite[Corollary 3 to Theorem 11.1]{tan70:1}.
For $p\in \mathbb Z$, we set
$\mathfrak h_p=\{X\in\mathfrak g_p:[X,\mathfrak g_{\leqq -2}]=\{0\}\}$.
We def\/ine $I\in\mathfrak{gl}(\mathfrak g_{-1})$ as follows:
$I(x)=-\sqrt{-1}x$ for $x\in\mathfrak e$,
$I(x)=\sqrt{-1}x$ for $x\in\mathfrak f$.
Then $I^2=-1$,
$I([a,x])=[a,I(x)]$ and $[I(x),I(y)]=[x,y]$ for $a\in\mathfrak g_0$ and
$x,y\in\mathfrak g_{-1}$. We put $\langle x,y\rangle=[I(x),y]$ for
$x,y\in\mathfrak g_{-1}$. Then $\langle x,y\rangle=\langle y,x\rangle$,
and for $x\in\mathfrak g_{-1}$, $\langle x,\mathfrak g_{-1}\rangle=\{0\}$
implies $x=0$, since $\mathfrak g_-$ is non-degenerate. Also
$\langle[a,x],y\rangle+\langle x,[a,y]\rangle=0$ and $[[b,x],y]=[[b,y],x]$
for $a\in\mathfrak h_0$, $b\in\mathfrak h_1$ and $x,y\in\mathfrak g_{-1}$.
Then, for $b\in\mathfrak h_1$,
$x,y,z\in\mathfrak g_{-1}$, we have $\langle[[b,x],y],z\rangle=
-\langle y,[[b,x],z]\rangle=-\langle y,[[b,z],x]\rangle=
\langle [[b,z],y],x\rangle=\langle[[b,y],z],x\rangle
=-\langle z,[[b,y],x]\rangle=-\langle[[b,x],y],z\rangle$, so
$\langle[[b,x],y],z\rangle=0$. By transitivity of $\mathfrak g$,
$\mathfrak h_1=\{0\}$. Therefore by \cite[Corollary 1 to Theorem 11.1]{tan70:1}, $\mathfrak g$ is f\/inite-dimensional.
(2) We may assume that $\mathfrak g_1\ne\{0\}$.
By \cite[Lemma 2.4]{yat88:0}, the
$\mathfrak g_0$-modules $\mathfrak e$, $\mathfrak f$ are not isomorphic to each
other. We put
$\mathfrak d=\{X\in\mathfrak g_{-1}:[X,\mathfrak g_{-1}]=\{0\}\}$;
then $\mathfrak d$ is a $\mathfrak g_0$-submodule of $\mathfrak g_{-1}$.
Hence $\mathfrak d=\{0\}$, $\mathfrak d=\mathfrak e$, $\mathfrak d=\mathfrak f$ or
$\mathfrak d=\mathfrak g_{-1}$. If $\mathfrak d\ne\{0\}$, then $\mathfrak g_{-2}=[\mathfrak e,\mathfrak f]=\{0\}$, which is a contradiction. Thus $\mathfrak g_-$ is non-degenerate.
By (1), $\mathfrak g$ is f\/inite-dimensional.
\end{proof}
The prolongation of a pseudo-product FGLA becomes
a pseudo-product GLA.
By Proposition~\ref{prop8.2}~(2), the prolongation of a free pseudo-product
FGLA is a pseudo-product GLA of irreducible type.
By Lemma \ref{lemma8.1}~(2), the prolongation of a free pseudo-product FGLA is
f\/inite-dimensional.
\begin{proposition}\label{prop8.3} Let $\mathfrak m=\bigoplus\limits_{p<0}\mathfrak g_p$ be a
free pseudo-product FGLA of type $(m,n,\mu)$
with pseudo-product structure $(\mathfrak e,\mathfrak f)$
and let
$\gla g$ be the prolongation of $(\mathfrak m;\mathfrak e,\mathfrak f)$.
\begin{enumerate}\itemsep=0pt
\item
$\mathfrak g_0$ is isomorphic to $\operatorname{\mathfrak{gl}}(\mathfrak e)\oplus\operatorname{\mathfrak{gl}}(\mathfrak f)$ as
a Lie algebra.
\item $\mathfrak g_{-2}$ is isomorphic to $\mathfrak e\otimes \mathfrak f$
as a $\mathfrak g_0$-module. In particular,
$\dim\mathfrak g_{-2}=mn$.
\item $\mathfrak g_{-3}$ is isomorphic to
$S^2(\mathfrak e)\otimes
\mathfrak f\oplus S^2(\mathfrak f)\otimes \mathfrak e$
as a $\mathfrak g_0$-module.
In particular,
$\dim\mathfrak g_{-3}=\frac{mn(m+n+2)}2$.
\end{enumerate}
\end{proposition}
\begin{proof} (1) This follows from Proposition \ref{prop8.2} (2).
\par
(2) Let $\mathfrak a=\bigoplus\limits_{p<0}\mathfrak a_p$ be the graded ideal
of $b(\mathfrak g_{-1},\mu)$ generated by
$[\mathfrak e,\mathfrak e]+[\mathfrak f,\mathfrak f]$.
By the construction of $b(\mathfrak g_{-1},\mu)_{-2}$,
$\mathfrak a_{-2}$ is isomorphic to $\Lambda^2(\mathfrak e)\oplus
\Lambda^2(\mathfrak f)$, so
$\mathfrak g_{-2}=b(\mathfrak g_{-1},\mu)_{-2}/\mathfrak a_{-2}$
is isomorphic to $\mathfrak e\otimes \mathfrak f$.
(3) By the construction of $b(\mathfrak g_{-1},\mu)_{-3}$,
$b(\mathfrak g_{-1},\mu)_{-3}$ is isomorphic to
\[
(\mathfrak e\oplus \mathfrak f)\otimes \Lambda^2(\mathfrak e\oplus \mathfrak f)/\Lambda^3(\mathfrak e\oplus \mathfrak f)\cong
(\mathfrak e\otimes \mathfrak e\otimes \mathfrak f)
\oplus
(\mathfrak e\otimes \mathfrak f\otimes \mathfrak f).
\]
Moreover,
$\mathfrak a_{-3}$ is isomorphic to
\[
(\mathfrak e\oplus \mathfrak f)\otimes \Lambda^2(\mathfrak e)
\oplus(\mathfrak e\oplus \mathfrak f)\otimes \Lambda^2(\mathfrak f)/
\Lambda^3(\mathfrak e\oplus \mathfrak f)
\cong \mathfrak e\otimes\Lambda^2(\mathfrak e)\oplus
\mathfrak f\otimes\Lambda^2(\mathfrak f).
\]
Hence
$\mathfrak g_{-3}=b(\mathfrak g_{-1},\mu)_{-3}/\mathfrak a_{-3}$
is isomorphic to
\[
(\mathfrak e\otimes \mathfrak e\otimes \mathfrak f)/
\Lambda^2(\mathfrak e)\otimes\mathfrak f\oplus
(\mathfrak e\otimes \mathfrak f\otimes \mathfrak f)/
\mathfrak e\otimes\Lambda^2(\mathfrak f)
\cong
S^2(\mathfrak e)\otimes
\mathfrak f\oplus S^2(\mathfrak f)\otimes \mathfrak e.
\]
This completes the proof.
\end{proof}
\begin{proposition}\label{prop8.4}
Let $\mathfrak m=\bigoplus\limits_{p<0}\mathfrak g_p$ be a
pseudo-product FGLA of the $\mu$-th kind with
pseudo-product structure $(\mathfrak e,\mathfrak f)$, where $\mu\geqq2$.
We denote by $\mathfrak c$
the centralizer of $\mathfrak g_{-2}$ in $\mathfrak g_{-1}$.
Let $\gla g$ be the prolongation of $(\mathfrak m;\mathfrak e,\mathfrak f)$.
Assume that $\mathfrak g_0$ is isomorphic to $\operatorname{\mathfrak{gl}}(\mathfrak e)\oplus\operatorname{\mathfrak{gl}}(\mathfrak f)$ as a Lie algebra.
\begin{enumerate}\itemsep=0pt
\item If $\mu=2$, then $\mathfrak m=\bigoplus\limits_{p<0}\mathfrak g_p$
be a free
pseudo-product FGLA.
\item If $\mu\geqq3$ and $\mathfrak c\ne\{0\}$,
then $(\mathfrak m;\mathfrak e,\mathfrak f)$ is not a free pseudo-product
FGLA.
\item If $\mu=3$ and $\mathfrak c=\{0\}$,
then $(\mathfrak m;\mathfrak e,\mathfrak f)$ is a free pseudo-product
FGLA.
\end{enumerate}
\end{proposition}
\begin{proof}
Let
$\check{\mathfrak m}=\bigoplus_{p<0}\limits\check{\mathfrak g}_p$
be the free pseudo-product FGLA of type $(m,n,\mu)$
with pseudo-product structure
$(\check{\mathfrak e},\check{\mathfrak f})$ such that
$\check{\mathfrak g}_{-1}=\mathfrak g_{-1}$,
$\check{\mathfrak e}=\mathfrak e$ and $\check{\mathfrak f}=\mathfrak f$.
Let $\check{\mathfrak g}=\bigoplus_{p\in\mathbb Z}\limits\check{\mathfrak g}_p$ be the prolongation of $(\check{\mathfrak m};\check{\mathfrak e},\check{\mathfrak f})$.
There exists a GLA epimorphism $\varphi$
of $\check{\mathfrak m}$ onto $\mathfrak m$ such that the restriction
$\varphi|\check{\mathfrak g}_{-1}$ is the identity mapping.
Since the mapping
$\check{\mathfrak g}_0\ni D\mapsto
(D|\mathfrak e,D|\mathfrak f)\in\operatorname{\mathfrak{gl}}(\mathfrak e)\times\operatorname{\mathfrak{gl}}(\mathfrak f)$
is an isomorphism,
$\varphi$ can be extended to be a homomorphism $\check{\varphi}$
of $\bigoplus\limits_{p\leqq0}\check{\mathfrak g}_{p}$ onto
$\bigoplus\limits_{p\leqq0}\mathfrak g_p$.
Let $\mathfrak a$ be the kernel of $\check{\varphi}$;
then $\mathfrak a$ is a graded ideal of
$\bigoplus\limits_{p\leqq0}\check{\mathfrak g}_{p}$.
We set $\mathfrak a_p
=\mathfrak a\cap \check{\mathfrak g}_{p}$; then
$\mathfrak a=\bigoplus\limits_{p\leqq 0}\mathfrak a_p$.
Since the restriction of $\check{\varphi}$ to
$\check{\mathfrak g}_{-1}\oplus \check{\mathfrak g}_0$ is injective,
$\mathfrak a_p=\{0\}$ for $p\geqq-1$. Also
each $\mathfrak a_p$ is a $\check{\mathfrak g}_0$-submodule of
$\check{\mathfrak g}_p$.
Since the $\check{\mathfrak g}_0$-module
$\check{\mathfrak g}_{-2}$ is irreducible (Proposition~\ref{prop8.3}~(2)),
$\varphi|\mathfrak g_{-2}$ is injective.
If $\mu=2$, then $\varphi$ is an isomorphism. This proves the assertion~(1).
Now we assume that $\mu\geqq3$. Then
\[
\check{\mathfrak g}_{-3}=[[\mathfrak e,\mathfrak f],\mathfrak f]\oplus
[[\mathfrak e,\mathfrak f],\mathfrak e].
\]
Since $\check{\mathfrak g}_0$-modules
$[[\mathfrak e,\mathfrak f],\mathfrak f]$ and
$[[\mathfrak e,\mathfrak f],\mathfrak e]$ are irreducible and not isomorphic
to each other (Proposition \ref{prop8.3} (3)),
one of the following cases occurs:
$(i)$~$\mathfrak a_{-3}=[[\mathfrak e,\mathfrak f],\mathfrak f]$;
$(ii)$~$\mathfrak a_{-3}=[[\mathfrak e,\mathfrak f],\mathfrak e]$;
$(iii)$~$\mathfrak a_{-3}=\{0\}$.
If $\mathfrak a_{-3}=[[\mathfrak e,\mathfrak f],\mathfrak f]$ (resp.\
$\mathfrak a_{-3}=[[\mathfrak e,\mathfrak f],\mathfrak e]$), then
$\mathfrak c=\mathfrak f$
(resp. $\mathfrak c=\mathfrak e$).
Also
since $\mathfrak g_0$-modules
$\mathfrak e$,
$\mathfrak f$ are irreducible and not isomorphic to each other,
one of the following cases occurs:
$(i)$~$\mathfrak c=\mathfrak e$;
$(ii)$~$\mathfrak c=\mathfrak f$;
$(iii)$~$\mathfrak c=\{0\}$.
If $\mathfrak c=\mathfrak e$
(resp. $\mathfrak c=\mathfrak f$),
then $\mathfrak a_{-3}=[[\mathfrak e,\mathfrak f],\mathfrak e]$
(resp. $\mathfrak a_{-3}=[[\mathfrak e,\mathfrak f],\mathfrak f]$).
In this case, $\varphi$ is not injective.
Hence $(\mathfrak m;\mathfrak e,\mathfrak f)$ is not free.
If $\mathfrak c=\{0\}$,
then $\mathfrak a_{-3}=\{0\}$.
Hence $\varphi|\check{\mathfrak g}_{-3}$ is an isomorphism.
In particular, if $\mu=3$, then
$(\mathfrak m;\mathfrak e,\mathfrak f)$ is free.
\end{proof}
\begin{example}
Let $V$ and $W$ be f\/inite-dimensional vector spaces and $k\geqq1$.
We set
\begin{gather*}
\mathfrak C^{k}(V,W) =\bigoplus\limits_{p=-k-1}^{-1}\mathfrak C^{k}(V,W)_p, \\
\mathfrak C^{k}(V,W)_p=W\otimes S^{k+p+1}(V^*), \qquad
-k-1\leqq p\leqq -2, \\
\mathfrak C^{k}(V,W)_{-1} =V\oplus (W\otimes S^{k}(V^*)).
\end{gather*}
The bracket operation of $\mathfrak C^{k}(V,W)$ is def\/ined as follows:
\begin{gather*}
[W,V]=\{0\},\qquad [V,V]=\{0\}, \qquad [W\otimes S^{r}(V^*),W\otimes S^{s}(V^*)]=\{0\}, \\
[w\otimes s_r,v]=w\otimes (v\lrcorner\,s_r)\qquad \text{for}\ v\in V,\ w\in W,
\ s_r\in S^r(V^*).
\end{gather*}
Equipped with this bracket operation, $\mathfrak C^{k}(V,W)$ becomes
a pseudo-product FGLA of the $(k+1)$-th kind
with pseudo-product structure $(V,W\otimes S^{k}(V^*))$,
which is called
{\it the contact algebra of order $k$ of bidegree} $(n,m)$, where
$n=\dim V$ and $m=\dim W$ (cf.~\cite[p.~133]{yam82:1}).
We assume that $\mathfrak C^{k}(V,W)$ is a free pseudo-product FGLA.
Since
\begin{gather*}
\dim \mathfrak C^{k}(V,W)_{-2}=m\binom{n+k-2}{k-1}, \qquad
\dim V\dim (W\otimes S^{k}(V^*))= nm\binom{n+k-1}{k},
\end{gather*}
we get $n=1$.
Since $W\otimes S^{k}(V^*)$ is contained in the centralizer of
$\mathfrak C^{k}(V,W)_{-2}$
in $\mathfrak C^{k}(V,W)_{-1}$, we get $k=1$.
Thus we obtain that $\mathfrak C^{k}(V,W)$ is a free pseudo-product FGLA
if and only if $k=1$, $n=1$.
\end{example}
\begin{example}\label{ex8.2}
Let $\gla g$ be a f\/inite-dimensional simple GLA of type
$(A_{m+n},\{\alpha_m,\alpha_{m+1}\})$.
We set $\mathfrak e=\mathfrak g_{-1}^{(m)}$, $\mathfrak f=\mathfrak g_{-1}^{(m+1)}$. Then $(\mathfrak g_-;\mathfrak e,\mathfrak f)$
is a pseudo-product FGLA.
Since $\dim \mathfrak e=m$, $\dim\mathfrak f=n$ and
$\dim\mathfrak g_{-2}=mn$, the pseudo-product FGLA
$(\mathfrak g_-;\mathfrak e,\mathfrak f)$ is a free pseudo-product FGLA
of type $(m,n,2)$ (Proposition 8.3 (2)). Also
$\gla g$ is the prolongation of $\mathfrak g_-$ except for the following
cases (see \cite{yam93:1}):
\begin{enumerate}\itemsep=0pt
\item $m=n=1$. In this case,
the prolongation of $\mathfrak g_-$ is isomorphic to $K(1)$.
\item $m=1$ or $n=1$ and $l=\max\{m,n\}\geqq2$. In this case,
the prolongation of $\mathfrak g_-$ is isomorphic to $W(l+1;\bm s)$,
where $\bm s=(1,2,\dots,2)$.
\end{enumerate}
\end{example}
\begin{example}
Let $V$ and $W$ be f\/inite-dimensional vector spaces
such that $\dim V=m\geqq1$ and $\dim W=n\geqq1$.
We set
\begin{gather*}
\mathfrak g_{-1} =V\oplus W, \qquad \mathfrak g_{-2}=V\otimes W, \\
\mathfrak g_{-3} = V\otimes S^2(W)\oplus S^2(V)\otimes W,
\qquad \mathfrak m=\mathfrak g_{-1}\oplus \mathfrak g_{-2}\oplus
\mathfrak g_{-3}.
\end{gather*}
The bracket operation of $\mathfrak m$ is def\/ined as follows:
\begin{gather*}
[\mathfrak g_{-3},\mathfrak g_{-1}\oplus \mathfrak g_{-2}]
=[\mathfrak g_{-2},\mathfrak g_{-2}]=\{0\}, \qquad [V,V]=[W,W]=\{0\},\\
[v,w]=-[w,v]=v\otimes w, \qquad
[v,v'\otimes w]=-[v'\otimes w,v]=v\circledcirc v'\otimes w,
\\ [v\otimes w,w']=-[w',v\otimes w]=v\otimes w\circledcirc w',
\end{gather*}
where $v,v'\in V$ and $w,w'\in W$.
Equipped with this bracket operation, $\mathfrak m$ becomes
a free pseudo-product FGLA of type $(m,n,3)$ with pseudo-product structure
$(V,W)$ (Proposition~\ref{prop8.3}).
\end{example}
\begin{theorem}\label{thm8.1}
Let $\mathfrak m=\bigoplus\limits_{p<0}\mathfrak g_p$ be a free
pseudo-product FGLA of type $(m,n,\mu)$ with
pseudo-product structure $(\mathfrak e,\mathfrak f)$ over $\mathbb C$.
Furthermore let
$\gla g$
$($resp.\
$\mathfrak g(\mathfrak m)
=\bigoplus\limits_{p\in\mathbb Z}\mathfrak g(\mathfrak m)_p)$
be the prolongation of $(\mathfrak m;\mathfrak e,\mathfrak f)$
$($resp.~$\mathfrak m)$.
\begin{enumerate}\itemsep=0pt
\item Assume that $\dim \mathfrak g(\mathfrak m)=\infty$.
Then $m=1$ or $n=1$, and $\mu=2$.
Furthermore $\gla g$ is isomorphic to
a finite-dimensional simple GLA of type $(A_{l+1},\{\alpha_1,\alpha_2\})$,
where $l=\max\{m,n\}$.
If $l=1$, then $\mathfrak g(\mathfrak m)$
is isomorphic to $K(1)$.
If $l\geqq2$, then $\mathfrak g(\mathfrak m)$
is isomorphic to $W(l+1;\bm s)$,
where $\bm s=(1,2,\dots,2)$.
\item
If $\mathfrak g_{1}\!\ne\!\{0\}$, then $\gla g$
is a finite-dimensional simple GLA of type
$(A_{m{+}n},\{\alpha_m,\alpha_{m{+}1}\}).\!$
\end{enumerate}
\end{theorem}
\begin{proof}
(1) For $p\!\geqq\!{-}1$, we put
$\mathfrak h_p
=\{X\in\mathfrak g(\mathfrak m)_p:[X,\mathfrak g_{\leqq -2}]=\{0\}\}$.
Assume that \mbox{$\dim \mathfrak g(\mathfrak m)=\infty$} and $\mu\geqq3$.
By Proposition \ref{prop8.4} (2),
$\mathfrak h_{-1}=\{0\}$ .
Since $[\mathfrak h_0,\mathfrak g_{-1}]\subset \mathfrak h_{-1}=\{0\}$,
we see that $\mathfrak h_0=\{0\}$.
By \cite[Corollary 1 to Theorem 11.1]{tan70:1}, we obtain that
$\dim\mathfrak g(\mathfrak m)<\infty$,
which is a~contradiction. Thus we see that $\mu=2$ if $\dim \mathfrak g(\mathfrak m)=\infty$. The remaining assertion follows from Example~\ref{ex8.2}.
(2)
Assume that $\mathfrak g_1\ne\{0\}$ and $\mu\geqq3$.
By transitivity of $\mathfrak g$,
$[\mathfrak g_1,\mathfrak e]\ne\{0\}$ or $[\mathfrak g_1,\mathfrak f]\ne\{0\}$.
We may assume that $[\mathfrak g_1,\mathfrak e]\ne\{0\}$.
Then there exists an irreducible component $\mathfrak g'_1$ of
the $\mathfrak g_0$-module~$\mathfrak g_1$
such that $[\mathfrak g'_1,\mathfrak e]\ne\{0\}$ and
$[\mathfrak g'_1,\mathfrak f]=\{0\}$.
The subalgebra $\mathfrak e\oplus[\mathfrak e,\mathfrak g'_1]\oplus
\mathfrak g'_1$ is a simple GLA of the f\/irst kind.
Since $\mathfrak g_0$ is isomorphic to $\mathfrak{gl}(\mathfrak e)\oplus
\mathfrak{gl}(\mathfrak f)$,
$\mathfrak e\oplus[\mathfrak e,\mathfrak g'_1]\oplus \mathfrak g'_1$ is of type $(A_m,\{\alpha_1\})$.
Let~$D$ be a nonzero element of $\mathfrak g'_1$.
There exist $\lambda\in \mathfrak e^*$ and $\eta\in\mathfrak f^*$ such that
\[
[[D,Z],U]=\lambda(U)Z+\lambda(Z)U, \qquad [[D,Z],W]=\eta(Z)W,
\]
where $Z,U\in \mathfrak e$ and $W\in \mathfrak f$
(cf.~\cite[p.~4]{tan57:1}).
Let $X$ (resp.~$Y$) be a nonzero element of $\mathfrak e$
(resp. $\mathfrak f$).
Since the subalgebra generated by $X,Y$ is a free FGLA of type $(2,\mu)$
(Remark~\ref{rem8.1}~(2)),
\begin{gather*}
\operatorname{ad}(X)^\mu(Y)=0, \qquad \operatorname{ad}(X)^{\mu-1}(Y)\ne 0, \\
\operatorname{ad}(Y)\operatorname{ad}(X)^{\mu-1}(Y)=0, \qquad \operatorname{ad}(Y)\operatorname{ad}(X)^{\mu-2}(Y)\ne 0
\end{gather*}
(Lemma \ref{lem2.1}).
By induction on $\mu$, we see that
\begin{gather*}
0 = \operatorname{ad}(D)\operatorname{ad}(X)^\mu(Y)=(\mu(\mu-1)\lambda(X)+\mu\eta(X))\operatorname{ad}(X)^{\mu-1}(Y),
\\
0 = \operatorname{ad}(D)\operatorname{ad}(Y)\operatorname{ad}(X)^{\mu-1}(Y)\\
\hphantom{0}{} =((\mu-1)(\mu-2)\lambda(X)
+(\mu-1)\eta(X))\operatorname{ad}(Y)\operatorname{ad}(X)^{\mu-2}(Y).
\end{gather*}
Since
\[
\det\begin{bmatrix}
\mu(\mu-1) & \mu \\
(\mu-1)(\mu-2) & \mu-1
\end{bmatrix}=
\mu(\mu-1)\ne0,
\]
we see that $\lambda(X)=\eta(X)=0$, which is a contradiction.
Thus we obtain that $\mu=2$ if \mbox{$\dim \mathfrak g_1\ne\{0\}$}.
From Example~\ref{ex8.2}, it follows that
$\gla g$ is a simple GLA of type $(A_{m+n}$, $\{\alpha_m,\alpha_{m+1}\})$
if $\dim \mathfrak g_1\ne\{0\}$.
\end{proof}
\section{Automorphism groups of the prolongations\\ of free pseudo-product
fundamental graded Lie algebras}\label{section9}
For a GLA $\gla g$
we denote by $\operatorname{Aut}(\mathfrak g)_0$ the group of all the automorphisms of
$\mathfrak g$ preserving the gradation of $\mathfrak g$:
\[
\operatorname{Aut}(\mathfrak g)_0=\{\varphi\in\operatorname{Aut}(\mathfrak g):\varphi(\mathfrak g_p)=
\mathfrak g_p \ \text{for all}\ p\in\mathbb Z\}.
\]
\begin{proposition}\label{prop9.1} Let $\mathfrak m=\bigoplus\limits_{p<0}\mathfrak g_p$ be
an FGLA and let $\mathfrak g(\mathfrak m)
=\bigoplus\limits_{p\in\mathbb Z}\mathfrak g(\mathfrak m)_p$
be the prolongation of~$\mathfrak m$.
The mapping
$\Phi:\operatorname{Aut}(\mathfrak g(\mathfrak m))_0\ni\phi\mapsto
\phi|\mathfrak m\in\operatorname{Aut}(\mathfrak m)_0$ is an isomorphism.
\end{proposition}
\begin{proof}
It is clear that $\Phi$ is a group homomorphism.
We prove that $\Phi$ is injective.
Let $\phi$ be an element of $\operatorname{Ker}\Phi$.
Assume that $\phi(X)=X$ for all $X\in \mathfrak g(\mathfrak m)_p$ $(p<k)$.
For $X\in\mathfrak g(\mathfrak m)_k$, $Y\in\mathfrak g_{-1}$,
\[
[\phi(X)-X,Y]=\phi([X,Y])-[X,Y].
\]
Since $[X,Y]\in\mathfrak g(\mathfrak m)_{k-1}$, we have $[\phi(X)-X,Y]=0$.
By transitivity, $\phi(X)=X$.
By induction, we have proved $\phi$ to be the identity mapping.
Hence $\Phi$ is a monomorphism.
We prove that $\Phi$ is surjective.
Let $\varphi\in\operatorname{Aut}(\mathfrak m)_0$.
We construct the mapping
$\varphi_p:\mathfrak g(\mathfrak m)_p\to\mathfrak g(\mathfrak m)_p$
inductively as follows:
First for $X\in\mathfrak g(\mathfrak m)_0$, we set
$\varphi_0(X)=\varphi X\varphi^{-1}$.
Then for $Y,Z\in \mathfrak m$
\[
\varphi_0(X)([Y,Z])=[\varphi(X(\varphi^{-1}(Y))),Z]
+[Y,\varphi(X(\varphi^{-1}(Z)))],
\]
so $\varphi_0(X)\in\mathfrak g(\mathfrak m)_0$.
Furthermore we can prove easily that
$[\varphi_0(X),\varphi_p(Y)]=\varphi_p([X,Y])$ for
$X\in\mathfrak g_0$ and $Y\in \mathfrak g_p$ $(p\leqq 0)$.
Here for $p<0$ we set $\varphi_p=\varphi|\mathfrak g(\mathfrak m)_p$.
Assume that we have def\/ined linear isomorphisms $\varphi_p$ of
$\mathfrak g(\mathfrak m)_p$ onto itself $(0\leqq p<k)$ such that
\[
\varphi_{r+s}([X,Y])=[\varphi_r(X),\varphi_s(Y)]
\]
for $X\in\mathfrak g(\mathfrak m)_r$, $Y\in\mathfrak g(\mathfrak m)_s$
$(r+s<k$, $r<k$, $s<k)$.
For $X\in\mathfrak g(\mathfrak m)_k$ we def\/ine $\varphi_k(X)\in
\operatorname{Hom}(\mathfrak m,\bigoplus\limits_{p\leqq k-1}\mathfrak g(\mathfrak m)_p)_k$
as follows:
\[
\varphi_k(X)(Y)=\varphi_{k+s}([X,\varphi^{-1}(Y)]), \qquad Y\in\mathfrak g_s, \ s<0.
\]
For $Y\in\mathfrak g_s$, $Z\in\mathfrak g_t$ $(s,t<0)$,
\begin{gather*}
\varphi_k(X)([Y,Z]) =\varphi_{k+t+s}([X,\varphi^{-1}([Y,Z]]) \\
\hphantom{\varphi_k(X)([Y,Z])}{}
=\varphi_{k+s+t}([[X,\varphi^{-1}(Y)],\varphi^{-1}(Z)]
+[\varphi^{-1}(Y),[X,\varphi^{-1}(Z)]]) \\
\hphantom{\varphi_k(X)([Y,Z])}{}
=[\varphi_{k+s}([X,\varphi^{-1}(Y)]),Z]
+[Y,\varphi_{k+t}([X,\varphi^{-1}(Z)])] \\
\hphantom{\varphi_k(X)([Y,Z])}{}
=[\varphi_k(X)(Y),Z]+[Y,\varphi_k(X)(Z)],
\end{gather*}
so $\varphi_k(X)\in\mathfrak g(\mathfrak m)_k$.
Next we prove that for $X\in\mathfrak g_p$, $Y\in\mathfrak g_q$ $(p+q=k$,
$0\leqq p\leqq k$, $0\leqq q\leqq k)$,
\[
\varphi_k([X,Y])=[\varphi_p(X),\varphi_q(Y)].
\]
For $Z\in\mathfrak g_s$ $(s<0)$,
\begin{gather*}
[[\varphi_p(X),\varphi_q(Y)],Z]
=[\varphi_p(X),[\varphi_q(Y),Z]]-[\varphi_q(Y),[\varphi_p(X),Z]] \\
\hphantom{[[\varphi_p(X),\varphi_q(Y)],Z]}{}
=\varphi_{p+q+s}([X,[Y,\varphi^{-1}(Z)]]-[Y,[X,\varphi^{-1}(Z)]]) \\
\hphantom{[[\varphi_p(X),\varphi_q(Y)],Z]}{}
=\varphi_{p+q+s}([[X,Y],\varphi^{-1}(Z)])
=[\varphi_k([X,Y]),Z].
\end{gather*}
By transitivity, we see that
$\varphi_k([X,Y])=[\varphi_p(X),\varphi_q(Y)]$.
We def\/ine a mapping $\check{\varphi}$ of $\mathfrak g(\mathfrak m)$ into
itself as follows:
\[\check{\varphi}(X)=
\begin{cases}
\varphi(X), & X\in\mathfrak m, \\
\varphi_k(X), & k\geqq0, \ X\in\mathfrak g(\mathfrak m)_k.
\end{cases}
\]
From the above results and the def\/inition of $\varphi_k$ $(k\geqq0)$,
we see that $\check{\varphi}$ is a GLA homomorphism.
Assume that $\varphi_{k-1}$ $(k\geqq0)$ is a linear isomorphism.
For $X\in \mathfrak g(\mathfrak m)_k$, if $\varphi_k(X)=0$, then
$0=[\varphi_k(X),Y]=\varphi_{k-1}([X,\varphi^{-1}(Y)])$ for all
$Y\in\mathfrak g_{-1}$. By transitivity, we see that $X=0$,
so $\varphi_k$ is a linear isomorphism.
Therefore $\check{\varphi}$ is an automorphism of $\mathfrak g(\mathfrak m)$.
\end{proof}
\begin{theorem}\label{thm9.1} Let $\mathfrak m=\bigoplus\limits_{p<0}\mathfrak g_p$ be a
free FGLA over $\mathbb C$, and let
$\mathfrak g(\mathfrak m)
=\bigoplus\limits_{p\in\mathbb Z}\mathfrak g(\mathfrak m)_p$
be the prolongation of $\mathfrak m$.
The mapping $\Phi:\operatorname{Aut}(\mathfrak g(\mathfrak m))_0\ni\phi\mapsto \phi|\mathfrak g_{-1}\in GL(\mathfrak g_{-1})$ is an isomorphism.
\end{theorem}
\begin{proof}
We may assume that $\mathfrak m$ is a universal FGLA $b(\mathfrak g_{-1},\mu)$ of the $\mu$-th kind.
By Corollary~1 to Proposition~3.2 of~\cite{tan70:1},
the mapping $\operatorname{Aut}(\mathfrak m)_0\ni a\mapsto a|\mathfrak g_{-1}\in
GL(\mathfrak g_{-1})$ is an isomorphism.
By Proposition~\ref{prop9.1}, we see that the mapping
$\Phi:\operatorname{Aut}(\mathfrak g(\mathfrak m))_0\ni\phi\mapsto \phi|\mathfrak g_{-1}\in GL(\mathfrak g_{-1})$ is an isomorphism.
\end{proof}
For a pseudo-product GLA $\gla g$ with pseudo-product structure
$(\mathfrak e,\mathfrak f)$,
we denote by $\operatorname{Aut}(\mathfrak g;\mathfrak e,\mathfrak f)_0$ the group of all the automorphisms of
$\mathfrak g$ preserving the gradation of $\mathfrak g$, $\mathfrak e$
and $\mathfrak f$:
\[
\operatorname{Aut}(\mathfrak g;\mathfrak e,\mathfrak f)_0
=\{\varphi\in\operatorname{Aut}(\mathfrak g)_0:
\varphi(\mathfrak e)=\mathfrak e,\varphi(\mathfrak f)=\mathfrak f
\}.
\]
\begin{theorem}\label{thm9.2} Let $\mathfrak m=\bigoplus\limits_{p<0}\mathfrak g_p$ be a
free pseudo-product FGLA of type $(m,n,\mu)$ with pseudo-product structure
$(\mathfrak e,\mathfrak f)$ over $\mathbb C$, and let
$\mathfrak g
=\bigoplus\limits_{p\in\mathbb Z}\mathfrak g_p$
be the prolongation of $(\mathfrak m;\mathfrak e,\mathfrak f)$.
The mapping $\Phi:\operatorname{Aut}(\mathfrak g;\mathfrak e,\mathfrak f)_0\ni\phi\mapsto
(\phi|\mathfrak e,\phi|\mathfrak f)\in GL(\mathfrak e)\times GL(\mathfrak f)$
is an isomorphism.
Furthermore if $\dim\mathfrak e\ne\dim \mathfrak f$,
then $\operatorname{Aut}(\mathfrak g;\mathfrak e,\mathfrak f)_0=\operatorname{Aut}(\mathfrak g)_0$.
\end{theorem}
\begin{proof}
Clearly $\Phi$ is a monomorphism.
We show that $\Phi$ is surjective.
Let $(\phi_1,\phi_2)$ be an element of
$GL(\mathfrak e)\times GL(\mathfrak f)$.
We set $\phi=\phi_1\oplus\phi_2\in GL(\mathfrak g_{-1})$.
By Corollary~1 to Proposition~3.2 of \cite{tan70:1},
there exists an element
$\varphi_1\in \operatorname{Aut}(b(\mathfrak g_{-1},\mu))_0$ such that
$\varphi_1|\mathfrak g_{-1}=\phi$.
Since $\varphi_1([\mathfrak e,\mathfrak e]+[\mathfrak f,\mathfrak f])=
[\mathfrak e,\mathfrak e]+[\mathfrak f,\mathfrak f]$,
$\varphi_1$ induces an element
$\varphi_2\in \operatorname{Aut}(\mathfrak m;\mathfrak e,\mathfrak f)_0$
such that $\varphi_2|\mathfrak g_{-1}=\phi$.
By Proposition \ref{prop9.1}, there exists
$\varphi_3\in \operatorname{Aut}(\mathfrak g(\mathfrak m))_0$
such that $\varphi_3|\mathfrak m=\varphi_2$.
We prove that $\varphi_3(\mathfrak g)=\mathfrak g$.
For $X_0\in\mathfrak g_0$ and $Y\in\mathfrak e$,
we see that $[\varphi_3(X_0),Y]=\varphi_3([X_0,\varphi_3^{-1}(Y)])\in
\varphi_3(\mathfrak e)=\mathfrak e$, so
$\varphi_3(X_0)(\mathfrak e)\subset \mathfrak e$.
Similarly we get $\varphi_3(X_0)(\mathfrak f)\subset \mathfrak f$.
Thus we obtain that $\varphi_3(\mathfrak g_0)=\mathfrak g_0$.
Now we assume that $\varphi_i(\mathfrak g_i)=\mathfrak g_i$
for $0\leqq i\leqq k$. Then
for $X_{k+1}\in\mathfrak g_{k+1}$ and $Y\in\mathfrak g_{p}$ $(p<0)$,
we see that $[\varphi_3(X_{k+1}),Y]=\varphi_3([X_{k+1},\varphi_3^{-1}(Y)])\in
\varphi_3(\mathfrak g_{p+k+1})=\mathfrak g_{p+k+1}$, so
$\varphi_3(\mathfrak g_{k+1})\subset \mathfrak g_{k+1}$.
Hence $\varphi_3(\mathfrak g)=\mathfrak g$ and $\Phi$ is surjective.
Now we assume that $\dim\mathfrak e\ne\dim \mathfrak f$.
Let $\varphi\in\operatorname{Aut}(\mathfrak g)_0$.
Since $\mathfrak g_0$-modules $\mathfrak e$ and $\mathfrak f$ are not
isomorphic to each other, we see that
$(i)$ $\varphi(\mathfrak e)=\mathfrak e$, $\varphi(\mathfrak f)=\mathfrak f$ or
$(ii)$ $\varphi(\mathfrak e)=\mathfrak f$, $\varphi(\mathfrak f)=\mathfrak e$.
According to the assumption $\dim\mathfrak e\ne\dim\mathfrak f$,
we get $\varphi(\mathfrak e)=\mathfrak e$, $\varphi(\mathfrak f)=\mathfrak f$,
so $\varphi\in \operatorname{Aut}(\mathfrak g;\mathfrak e,\mathfrak f)_0$.
\end{proof}
\pdfbookmark[1]{References}{ref}
|
{
"timestamp": "2012-06-28T02:01:40",
"yymm": "1206",
"arxiv_id": "1206.6173",
"language": "en",
"url": "https://arxiv.org/abs/1206.6173"
}
|
\section{Introduction}
\label{sec:intro}
Complex networks are realistic substrates for simulating
many social and natural phenomena.
To address the influence of network topology,
primarily, different classes of degree distributions $P(k)$
can be considered.
Meanwhile, for a given distribution of degrees, correlations may
give rise to important network structure effects on the studied
process~\cite{vazquez,small,optimizing,epidemic,makse,threshold}.
These structural effects may have important consequences, for instance,
correlations may shift the epidemic threshold~\cite{threshold}.
Although correlation effects may be absent in some cases \cite{absence},
in other ones, they can not be neglected.
Despite there are efficient algorithms to generate networks with fixed
degree-degree correlations \cite{pusch}, real joint probabilities of two or more degrees measured
in networks of moderate size may be noisy and hard to be modeled.
Then, operationally, average nearest-neighbors degree distributions \cite{asymptotic}
or single quantity measures are used.
Although other variants have been defined in the literature \cite{mixing,reshuffling},
as quantifier of the tendency of adjacent vertices to have
similar or dissimilar degrees,
we will consider the standard measure of (linear) degree-degree correlations,
namely, the assortativity (Pearson) coefficient \cite{newman_assor}
\begin{equation} \label{def_r}
r=\frac{\langle kk' \rangle_e - \langle k\rangle^2_e }{\langle k^2 \rangle_e - \langle k\rangle^2_e},
\end{equation}
where $\langle \cdots \rangle_e$ denotes average over edges and $k$ and $k'$ are
the degrees of vertices at each end of an edge.
Despite this coefficient is known to present some drawbacks \cite{cap3}, it is a
standard and commonly used quantity, hence being worth to be analyzed.
Moreover, it has the advantage of
being a single value measure, that is easier to be controlled than other multi-valued quantities.
To analyze the influence of correlations, as well as of any other structural feature,
it is useful to build ensembles of networks holding that property,
while keeping fixed the sequence of degrees.
As it will be described in Sec. \ref{sec:ensembles}, this kind of ensembles can be
achieved by means of a suitable rewiring,
performed through a standard simulated annealing Monte Carlo (MC) procedure to minimize
a given energy-like quantity (maximum entropy ensemble approach),
function of the graph property to be controlled ($r$ in our case)~\cite{park,foster,noh}.
Once tuned $r$, it is important to characterize how other
network properties are altered as by product.
%
Some interdependencies among certain network properties have already been
numerically shown in the literature,
for real as well as for artificial graphs \cite{foster}.
Analytical relations have also been derived \cite{estrada,serrano2,dorogovtsev}.
Because of its crucial role in spreading phenomena \cite{newman2001},
we will focus here on the effect of $r$ over typical distance measures
as well as on the branching and transitivity of links.
As a measure of the average separation between nodes,
we consider the average path length \cite{WS}.
In the subsequent calculations we use the expression,
\begin{equation}
L= \frac{\sum_{i=1}^n \langle L_i \rangle N_i(N_i-1)}{\sum_{i=1}^n N_i(N_i-1)} \, ,
\end{equation}
where $n$ is the number of (disconnected) clusters and $N_i$ is the number of nodes in cluster $i$.
Moreover, being $d_{kj}$ the distance (number of edges along the shortest path)
between nodes $k$ and $j$ (taking $d_{kj}=0$ if the nodes do not belong to the same cluster),
then
\begin{equation}
\langle L_i \rangle = \frac{\sum_{j,k=1}^{N_i} d_{kj}}{ N_i(N_i-1)} \,.
\end{equation}
Alternatively, in order to avoid the issue of the divergence of the distance
between disconnected nodes, we consider
the inverse, $1/E$, of the so-called efficiency \cite{latora}
\begin{equation}
E=\frac{1}{N(N-1)} \sum_{ \substack{1\le i,j \le N \\ i\neq j }}\frac{1}{d_{ij}} \,,
\end{equation}
where $N$ is the number of nodes.
It represents a harmonic mean instead of the arithmetic one.
We also compute the diameter $D=\mbox{max}\{d_{ij}\}$.
The transitivity of links can be measured by the clustering coefficient \cite{barrat,newman_clus}
\begin{equation} \label{ntriangle}
C = \frac{6 n_\triangle}{ \sum_{i=1}^{N} k_i(k_i-1) } \,,
\end{equation}
where $n_\triangle$ is the number of triangles and $k_i$ is the degree of node $i$.
We also considered the mean value, $\bar{C}$, of the local clustering coefficient $C_i$,
defined as $C_i=2 e_i/(k_i(k_i-1))$, where $e_i$ is the number of connections between the
neighbors of vertex $i$ \cite{WS}. We took $C_i=0$ when $k_i=0$ or 1.
Other measures that arise in the decomposition of $r$ \cite{estrada} will also be considered.
Besides detecting interdependencies among structural properties, it is also important to know
how these properties depend on the system size $N$.
We will analyze these issues for two main classes of degree distributions
(Poisson and power-law tailed).
We will also investigate real networks degree sequences.
\section{Networks and ensembles}
\label{sec:ensembles}
For each class of networks, we will consider different values of the size, $N$,
and the mean degree, $\langle k \rangle$, within realistic ranges.
As a paradigm of the class of networks with a peaked distribution of degrees,
with all its moments finite,
we consider the random network of Erd\H{o}s and R\'enyi \cite{ER}. Within this
model, a network with $N$ nodes is assembled by selecting $M$ different pairs
of nodes at random and linking each pair.
The resulting distribution of links is the Poisson distribution
$P(k) = e^{-\langle k \rangle}\langle k \rangle^k/k!$,
where the mean degree is $\langle k \rangle = 2M/N$.
We also analyze networks of the power-law type, i.e., with $P(k)\sim k^{-\gamma}$, $\gamma >2$,
corresponding to a wide distribution of degrees, with power-law tails.
Then, moments of order $n\ge \gamma-1$ are divergent.
We built power-law networks by means of the configuration model \cite{CM}.
Following this procedure, one starts by choosing
$N$ random numbers $k$, drawn from the degree distribution $P(k)$.
They represent the number of edges coming out from each node, where these
edges have one end attached to the node and another still open.
Second, two open ends are randomly chosen and connected such that, although
multiple connections are allowed, self connections are not.
This second stage is repeated until each node attains the connectivity attributed
in the first step. If eventually an edge has an open end, then it is discarded.
However, for large networks, the fraction of discarded edges is negligible.
To draw the set of numbers $k$ with probability $P(k)= {\cal N} k^{-\gamma}$,
with $k_{min} \le k \le k_{max}$
(hence the normalization factor is ${\cal N} = 1/\sum_{k_{min}}^{k_{max}} k^{-\gamma}$),
we used the inverse transform algorithm \cite{transform}.
Notice that $k_{max}\le N-1$ and $k_{max}>>k_{min}$,
then we determined $k_{min}$ to fit the selected value of $\langle k \rangle$
(within a tolerance of at most 1\%), such that
\begin{equation} \label{k1}
\langle k \rangle =\frac{\sum_{k_{min}}^{k_{max}} k^{-\gamma+1}}{\sum_{k_{min}}^{k_{max}} k^{-\gamma}} \simeq
\frac{\gamma-1}{\gamma-2} \,
\frac{k_{max}^{2-\gamma}- k_{min}^{2-\gamma} }{k_{max}^{1-\gamma}- k_{min}^{1-\gamma}}
\simeq \frac{\gamma-1}{\gamma-2} k_{min}.
\end{equation}
It is worth mentioning that the value $k=N-1$ is not usually achieved,
the natural cut-off being $k_c\sim N^\frac{1}{\gamma-1}$ \cite{Dorogovtsev_cutoff}.
In order to attain a desired value of $r$, we follow an standard rewiring
approach.
We want to build an ensemble of networks \{G\} with a given value of $r$
($r$-ensemble) but that are
maximally random in other aspects, i.e.,
making the fewer number of assumptions as possible about
the distribution $P(G)$. Then, we use an exponential random graph model,
such that the set of networks \{G\} has distribution
$P(G)\propto {\rm e}^{-H(G)}$, where $H(G)$ is a Hamiltonian or energy-like quantity \cite{park}.
In order to get an $r$-ensemble, with $r=r_\star$,
we consider \cite{foster}
\begin{equation} \label{hg}
H(G)=\beta|r-r_\star|\,,
\end{equation}
where $\beta$ is a real parameter.
The ensemble can be simulated by means of a MC procedure:
at each step, a rewiring attempt is accepted with probability
$\mbox{min}\{1, e^{-[H(G')-H(G)]}\}$.
Rewiring steps are performed by randomly selecting two edges that
connect the vertices $a$, $b$ and $c$, $d$, respectively, and substituting those two
links by new ones connecting $a$, $c$ and $b$, $d$ \cite{rewiring}.
Movements yielding double links are forbidden. Notice that this process preserves the
connectivity of each node.
We start the simulation by taking $\beta=0$ [during at most 100 MC steps (MCS),
where each MCS corresponds to $N$ attempts].
The effect of this stage is basically to destroy multiple edges.
We did not notice any clear hysteresis effect
like those observed when controlling, instead, the number of triangles
with a differente Hamiltonian \cite{hysteresis}.
Subsequently, $\beta$ is increased (in increments $\Delta \beta =1000$), at each 50 MCS,
until $r$ stabilizes, typically attaining the prescribed value $r_\star$.
Then, the quantities of interest are calculated and
the whole process is repeated, starting with a new degree sequence.
For power-law degree distributions, we observed that the process is non-ergodic,
hence we computed sample mean and standard deviation over 100 realizations
of the described protocol.
We checked that the choice of other expressions for $H(G)$, vanishing at $r_\star$,
did not significantly affect the results but just the convergence time.
\section{Results}
\label{sec:results}
Let us start by reporting the effects of $r$ on the clustering coefficient $C$.
For the Poisson case, we depict in Fig.~\ref{fig:ERC}(a) the behavior
of $C$ as a function of $r$ for a fixed number of nodes ($N=8000$)
and different values of the mean connectivity $\langle k\rangle$.
Very small values of $C$ emerge. The transitivity $C$ monotonically increases with $r$.
This is consistent with the results of Ref. \cite{foster} (restricted to $r\ge0$)
for such kind of networks.
We observe two regimes with a crossover at $r\simeq 0.5$:
a very slight increase with $r$ below the crossover and a
more pronounced one in the region above it.
The existence of two regimes could be related to the assymetric character of $r$, which
does not measure assortativity and disassortativity on the same grounds.
Below the crossover, $C$ linearly increases with $\langle k \rangle$ about one order of
magnitude within the
analyzed range.
Meanwhile, above the crossover,
$C$ remains of the same order when the average connectivity increases, even for
small $\langle k \rangle$ (also see the inset of Fig.~\ref{fig:ERC}(a)
where $C$ is plotted vs $\langle k \rangle$ for selected
values of $r$). In Sec. \ref{sec:final}, we will discuss these issues in more detail.
For the mean local clustering coefficient $\bar{C}$,
we obtained a qualitatively similar dependence on $r$ than that observed for the clustering coefficient $C$. However, the increase of $\bar{C}$ with $\langle k \rangle$ is linear for any fixed $r$. For $r=0$, $C=\bar{C}=\langle k\rangle/N$, as expected.
In Fig.~\ref{fig:ERC}(b), size effects are exhibited for $\langle k\rangle=4$,
representative of the other values considered.
As the number of nodes increases, $C$ decays as
$C\sim 1/N$ for all $r$ (as depicted in the inset).
Therefore, in the $r$-ensemble of Poisson networks,
transitivity is only a finite-size effect and
vanishes in the infinite network (thermodynamic) limit with
the same asymptotic law $C\sim 1/N$ that
for an uncorrelated random graph \cite{APC}.
\begin{figure}[t!]
\includegraphics[width=0.49\textwidth]{fig1a.pdf}
\includegraphics[width=0.49\textwidth]{fig1b.pdf}
\caption{\label{fig:ERC}
Clustering coefficient $C$ as a function of $r$ for Poisson networks:
(a) $N=8000$ and different values
of $\langle k\rangle$ indicated on the figure.
The graph shows a monotonic increase of $C$ with $r$.
There are two regimes: a very slight increase of $C$ with $r$ below $r\simeq 0.5$ and a
more pronounced one above it.
(b) $\langle k\rangle=4$ and different number of nodes $N$, also indicated on
the figure. As the number of nodes increases, $C$ decays with
the asymptotic law $C\sim 1/N$, characteristic
of uncorrelated random graphs. Standard errors are about 10\%.
Dotted lines are a guide to the eyes.
The insets show $C$ vs $\langle k \rangle$ (a) and $N$ (b) for selected
values of $r$ (-0.6, 0.0, 0.6 and 0.8).
}
\end{figure}
\begin{figure}[b!]
\includegraphics[width=0.49\textwidth]{fig2a.pdf}
\includegraphics[width=0.49\textwidth]{fig2b.pdf}
\caption{\label{fig:rlim}
The range of allowed values of $r$ is restricted for the power-law class:
(a) Time evolution of $r$, after setting $r_\star=1$ (-1) to obtain $r_{max}$ ($r_{min}$),
for networks with power-law degree distribution (with $\gamma=3.5$ and $N=5000$).
Shown are 8 individual samples (thin lines) and their respective averages (thick lines).
(b) Average extreme values [$r_{max}$ (open symbols) and $r_{min}$ (filled symbols),
standard errors are at most 50\%] vs
system size $N$ for different values of $\gamma$ indicated on the figure.
Dotted and dashed lines are guides to the eye for $r_{max}$ and $r_{min}$, respectively.
In all cases $\langle k \rangle = 4.00 \pm 0.04$.
For a given size, the allowed interval of $r$ is narrower for lower $\gamma$.
}
\end{figure}
For the power-law class, the range of allowed values of $r$ is restricted.
That is, values of $r$ arbitrarily different from zero can not be attained in
typical realizations of the MC protocol described in Sec. \ref{sec:ensembles}.
In order to determine the typical maximal (minimal) values,
$r_{max}$ ($r_{min}$),
we imposed $r_\star= 1$ (-1) and detected the stationary values of $r$.
The time evolution of $r$ for $r_\star= 1$ (-1) is illustrated in Fig.~\ref{fig:rlim}(a)
for $\gamma=3.5$, $N=5000$ and $\langle k \rangle \simeq 4$.
Notice the large deviations amongst the steady values of different realizations mainly for the upper bound.
We verified that this picture does not change by implementing other definitions of $H(G)$ in Eq.~(\ref{hg}),
e.g., $\beta|r-r_\star|^\alpha$, with $\alpha \neq 1$.
Average extreme values (over 100 samples, after $3\times 10^4$ MCs)
are displayed in Fig.~\ref{fig:rlim}(b),
as a function of the system size, for different values of $\gamma$.
For fixed size, the lower $\gamma$, the narrower the allowed interval of $r$.
In fact, in networks constrained to a given degree sequence,
structural limitations (or correlations) arise:
either multiple connections or dissortative two vertices correlations \cite{newman03}.
For instance, the exclusion of multiple connections hampers the natural tendency that hubs connect among them,
hence diminishing the assortativity. This effect is more pronounced the smallest $\gamma$.
For fixed $\gamma$, the interval shrinks with system size, for $\gamma<3$, due to the divergence of fluctuations in the large $N$ limit \cite{cap3}.
In Ref. \cite{asymptotic}, similar restriction was also observed
for $2<\gamma <3$, although instead of the average connectivity,
$k_{min}$ was kept constant ($k_{min}=6$).
In that case, it was reported that the upper and lower bounds
both tend to zero, hence $r\to 0$ in the infinite network limit.
In fact, we observe that in that interval of $\gamma$ (e.g., $\gamma=2.5$)
both bounds are negative and as $N$ increases the allowed
interval collapses to a negative value that tends to zero.
We noticed restriction in the correlation bounds for $\gamma>3$ too.
As $N$ grows, the lower bound also increases towards zero
or at least to a small finite value.
Simultaneously, the upper bound seems more stable,
however its asymptotic behavior is not neat yet, even having considered up to $N>10^5$. Moreover, as $N$ increases, it takes longer to attain steady states.
The allowed interval of $r$ is quite restricted for scale-free networks.
However, we still analyzed systematically cases with $\gamma>3$
($\gamma=3.5$, $4.0$ and $4.5$), yielding finite second moment.
Even, in this range, the accessible interval of $r$ is limited,
then, we proceeded as follows.
If the desired $r_\star$ is not attained,
within a tolerance of $10^{-3}$, in $2\times 10^4$ MCS, that instance is
discarded and we make a new trial.
If we did not attain 100 successes in 200 trials, the procedure is interrupted.
Alternatively to the configuration model, we also started from networks generated by
preferential attachment \cite{PA}, yielding similar results.
\begin{figure}[h!]
\includegraphics[width=0.49\textwidth]{fig3a.pdf}
\includegraphics[width=0.49\textwidth]{fig3b.pdf}
\caption{\label{fig:SFC}
Clustering coefficient $C$ as a function of $r$, as in
Fig.~\ref{fig:ERC} but for power-law networks with $\gamma=4.0$:
(a) $N=8000$ and different values of $\langle k\rangle$.
In the assortative region, $C$ reaches larger values than in the Poisson class.
(b) $\langle k\rangle=4$ and different number of nodes $N$.
Note that for assortative networks a finite degree of clustering seems to persist for large networks.
Standard errors reach 50\% for the lowest values of $C$.
In the inset, missing values are due to the limitation in attaining the
prescribed values of $r$.
}
\end{figure}
The outcomes for the power-law class with $\gamma=4.0$ are displayed in Fig.~\ref{fig:SFC}.
Standard errors are larger in the power-law case, likely due to the variability in the tails
of the distribution of links from sample to sample.
Outcomes for the other two values of $\gamma$ studied (3.5 and 4.5)
display features similar to those of the case $\gamma=4.0$ used
as illustrative example, despite the third moment becoming divergent at $\gamma=4.0$.
Two regimes are also observed, with the crossover now closer to $r=0$,
but some qualitative differences appear in comparison to the Poisson case.
$C$ rapidly increases with $r$,
attaining, for assortative networks, larger values than in the Poisson class.
These large values are due to the inclusion of highly connected nodes, absent in the Poisson
networks, that for large $r_\star$ tend to gain links among them, contributing
strongly to $r$ and also to $C$.
With respect to finite size effects, below the crossover
the small non-null $C$ is again due only to the finite size of the network.
However, in the assortative region (above the crossover),
it seems that a finite degree of clustering persists for large networks
(see inset of Fig.~\ref{fig:SFC}(b)), in contrast to the Poisson case and to the
dissassortative region.
In fact, notice that, when $N$
increases one order of magnitude, $C$ decreases also one order of magnitude
in the dissortative region, while $C$ remains of the same order in the assortative interval.
Even if $C$ vanished in the infinite size limit, since the decay is very slow,
then an effective clustering would remain in moderate, or even large, size networks.
We will discuss the interplay between $C$ and $r$ further in Sec. \ref{sec:final}.
For the mean local clustering coefficient $\bar{C}$,
a qualitatively similar dependence on $r$ is observed,
but with smaller values.
Moreover, $\bar{C}$ increases linearly with $\langle k \rangle$,
in the analyzed range, for any fixed $r$, not only for dissortative networks,
and $\bar{C}$ decays with $N$ for any $r$. For$r=0$, $C=\bar{C}=(\langle k^2\rangle - \langle k\rangle)^2/(N\langle k \rangle^3)$ \cite{dorogovtsev}, as expected.
Let us analyze now the influence of $r$ on network characteristic lenghts.
The dependency of the measures $1/E$, $L$ and $D$ on $r$ is depicted in
Fig.~\ref{fig:distances}, for Poisson and power-law distributed networks, with $N=8000$ and $<k>\simeq 4$.
$1/E$ and $L$ have close values, systematically shifted.
In first approximation both types of network yield similar values of $1/E$ (hence also $L$),
given $N$ and $\langle k \rangle$.
However, the diameter $D$ is more dependent on the type of network. It is larger and
is more strongly affected by $r$ in the homogeneous Poisson case.
\begin{figure}[h!]
\includegraphics[width=0.52\textwidth]{fig4.pdf}\caption{\label{fig:distances}
Distance measures $1/E$, $L$ and diameter $D$, as a function of $r$ for
Poisson (open symbols) and $\gamma=4.0$ power-law (filled symbols) networks.
In all cases $N=8000$ and $\langle k\rangle =4.00 \pm 0.04$.
The inset is a zoom of the main plot. At first sight,
the two types of network display similar values of $1/E$ and $L$,
for a given $N$ and $\langle k \rangle$.
The diameter is more sensitive to the type of network and is more
affected by $r$ in the Poisson case.
}
\end{figure}
Plots of $1/E$ vs $r$ for different values of $N$ and $\langle k\rangle$
are shown in Figs. \ref{fig:ERL} and \ref{fig:SFL} for Poisson and power-law networks
respectively.
In both cases, the networks display the small-world property \cite{WS}
(even smaller in the power-law case) with a slow (logarithmic)
increase with $N$ and a smooth decrease with $\langle k \rangle$ (see insets of
Figs. \ref{fig:ERL} and \ref{fig:SFL}).
\begin{figure}[t!]
\includegraphics[width=0.49\textwidth]{fig5a.pdf}
\includegraphics[width=0.49\textwidth]{fig5b.pdf}
\caption{\label{fig:ERL}
Mean distance $1/E$ as a function of $r$ for Poisson networks:
(a) $N=8000$ and different values
of $\langle k\rangle$ indicated on the figure.
The networks exhibit the small-world property.
(b) $\langle k\rangle=4.00 \pm 0.04$ and different number of nodes $N$ indicated on
the figure. The effects of $r$ on the mean path are significant only
for small $\langle k \rangle$ because of the fragmentation of the network.}
\end{figure}
\begin{figure}[t!]
\includegraphics[width=0.49\textwidth]{fig6a.pdf}
\includegraphics[width=0.49\textwidth]{fig6b.pdf}
\caption{\label{fig:SFL}
Mean distance $1/E$ as a function of $r$ as in
Fig.~\ref{fig:ERL} but for power-law network with $\gamma=4.0$.
In this case, paths are shorter than in the Poisson nets. }
\end{figure}
In the Poisson case,
the mean path does not depend on $r$ significantly when $\langle k \rangle$ is not too small ($\ge 6$),
as indicated by the relatively flat plots in Fig.~\ref{fig:ERL}(a).
Only for small $\langle k \rangle$ there are important effects for assortative correlations.
For instance, for $\langle k \rangle \simeq 4$ (Fig.~\ref{fig:ERL}(b)),
$1/E$ increases in about two units from $r\simeq 0$ to $r\simeq 1$, for all the analyzed sizes.
This effect is still larger for $\langle k \rangle=2$ where $L$ increases by a factor about two
in the same interval of $r$, as shown in Fig.~\ref{fig:ERL}(a) for $N=8000$.
In order to further interpret these results, we investigated the cluster structure of the resulting
rewired networks. We measured the size of the largest cluster (let us call it $N_1$),
the number $n$ of clusters, and the average size $S$ of the clusters different from the largest one.
The plots are presented in Fig.~\ref{fig:clusters} for $\langle k \rangle=2$ and 4.
For $\langle k \rangle=4$, the relative size of the largest cluster (circles) is about 0.98 for most of the
range of $r$, notice however that it slightly decays towards $|r|=1$
(which is more evident for $\langle k \rangle=2$).
As $\langle k \rangle$ increases, the number of fragments
rapidly decays and the average size $S$ (triangles) tends to unit,
meaning that only single nodes are disconnected
(recall also that $P(0)={\rm e}^{-\langle k \rangle}$).
Therefore the increase of the mean distance towards $|r|=1$, observed for low
$\langle k \rangle$ in Fig.~\ref{fig:ERL},
may simply reflect the fragmentation of the network.
Clearly, for high values of the assortativity,
the network tends to fragment into groups of vertices that have the same degree.
Moreover, for values of $\langle k\rangle$ approximately larger than 1,
the percolation analysis performed by Noh on Poisson networks \cite{noh} shows that
the size of the largest cluster is smaller for assortative networks than for dissortative and neutral ones.
Meanwhile, as $\langle k \rangle$ increases,
the fraction of vertices that do not belong
to the largest cluster becomes negligible, although more slowly the more assortative the network.
Therefore, in such large $\langle k \rangle$ limit,
the mean distance remains invariant under changes of $r$.
Hence, transport processes modeled in these networks may suffer important impact
when $r$ is large and $\langle k \rangle$ small.
The longer the typical separation between nodes, the slower the propagation.
\begin{figure}[h!]
\includegraphics[width=0.52\textwidth]{fig7.pdf}
\caption{\label{fig:clusters}
Clusters analysis. Plots of $N_1/N$ (where $N_1$ is the size of the largest cluster) [circles],
average size of finite size clusters, $S$ [squares], and number of clusters, $n$ [triangles],
as a function of $r$
for Poisson networks with $\langle k\rangle=2$ (open symbols) and 4 (filled symbols).
The fragmentation of the network observed for high values of the assortativity reflects
the grouping of vertices with the same degree.
The figure shows outcomes for $N=8000$. Outcomes for sizes $N=4000$, 8000 and 16000
all collapse into single curves (not shown).
}
\end{figure}
In power-law networks, for fixed $N$ and $\langle k\rangle>2$, there is an interval of $r$
where paths are shorter than in the Poisson nets (Fig.~\ref{fig:distances}),
and still shorter as $\gamma$ decreases (not shown).
Moreover $1/E$ becomes more sensitive to the coefficient $r$ (smile shape),
in the region where plots are flat for Poisson networks.
Notice also that minimal mean paths occur for slightly assortative correlations ($r \gtrsim 0$),
slowly increasing with $N$ (Fig.~\ref{fig:SFL}(b)).
The analysis of clusters for $\gamma=4.0$, shows that for $\langle k\rangle \ge 4$
there is a single cluster of size $N$, for all $r$.
Only for $\langle k\rangle=2.0$ we observed fragmentation with
$N_1/N\simeq 0.7-0.8$, $n/N\simeq 0.06$, $S\simeq 4$ for all $N>2000$ (plots not shown).
For the mean path $L$, we observed qualitatively
similar outcomes although shifted to slightly higher values, as illustrated in
Fig.~\ref{fig:distances}.
\begin{figure}[t!]
\includegraphics[width=0.49\textwidth]{fig8a.pdf}
\includegraphics[width=0.49\textwidth]{fig8b.pdf}
\caption{\label{fig:real}
$C$ and $1/E$ vs $r$, for real networks. Original values, before rewiring,
are also indicated (filled symbols). PGP (Pretty Good Privacy encrypted communication network)
\cite{PGP}: $N=10680$, $\langle k\rangle\simeq 4.55$; P2P (Gnutella peer-to-peer network)
\cite{P2P}: $N=10876$, $\langle k\rangle\simeq 7.35$; PGR (power grid) \cite{PGR},
$N=4941$, $\langle k\rangle\simeq 2.67$; APC (astrophysics collaboration)
\cite{APC}: $N=16706$, $\langle k\rangle\simeq 14.5$.
Besides the different details of real degree sequences,
we can interpret the main features of these nets in terms
of those observed for the Poisson and power-law classes.
}
\end{figure}
We also applied the rewiring procedure described in Sec. \ref{sec:ensembles} to
real degree sequences. Networks were symmetrized and edge weights were ignored.
In Fig.~\ref{fig:real} we depict the behavior of $C$ and $1/E$ vs
$r$, for
the PGP (encrypted communication network using Pretty Good Privacy encryption algorithm)
largest component \cite{PGP},
P2P (Gnutella peer-to-peer network) \cite{P2P},
PGR (electrical power grid of the western United States), example of small-world network \cite{PGR} and
APC (astrophysics collaboration network) \cite{APC}.
First notice that in all cases the clustering $C$ is much larger in real networks than in
the randomized ($r$-ensemble) ones, as already observed for other examples in Ref. \cite{foster}.
The mean distance is also typically larger in the real networks.
An exception is P2P network, characterized by a value of $1/E$ typical of the $r$-ensemble.
Rewired real networks display some of the typical behaviors observed for the artificial cases.
Let us make some remarks arising from comparisons.
(i) PGR (power grid) displays plots of $C$ vs $r$ and $1/E$ vs $r$ that are in good accord with
those observed for similar parameters $\langle k \rangle$ and $N$ of the Poisson case.
In fact its degree distribution decays exponentially.
(ii) P2P presents power-law decay of the degree distributions, for $k>10$, with exponent close to
$\gamma =4$. Both plots of P2P are in agreement with those obtained
for the $\gamma=4$ class with similar values of $N$ and $\langle k \rangle$, despite the
distributions only share in common the power-law tail.
(iii) PGP (of size similar to P2P) has a degree distribution that decays
with exponent $\gamma<3$ for $k<50$ and $\gamma\simeq 4$ beyond \cite{PGP}.
The plot for $1/E$ vs $r$ presents larger values of $1/E$ than P2P consistent with
its $\langle k \rangle$. However, the plot $C$ vs $r$ of PGP deviates
from the standard behavior, presenting larger values of $C$ that increase with $r$ in a single regime.
The interval of allowed values of $r$ is sensitive also to features other than the tails.
These deviations can be attributed to different initial power law regimes.
(iv) Finally, APC has a power-law decay with exponent $\gamma \simeq 1$ and exponential cut-off for
$k>50$ \cite{APC}. The low and constant plot of $1/E$ vs $r$ is expected for a network with large
$\langle k \rangle$, almost independently of the class of degree distribution. The large values
of $C$ are also consistent with heterogeneous distributions with large $\langle k \rangle$.
Then, despite the different details of real degree distributions, the main observed features
can be interpreted in terms of the analyzed classes with corresponding values of parameters
$\langle k \rangle$ and $N$.
\section{Discussion and final remarks}
\label{sec:final}
For all classes of networks considered, $C$ increases with $r$ in the whole allowed range of $r$.
This behavior has already been observed in Ref. \cite{foster},
where only positive values of $r$ were analyzed
and also in Ref. \cite{serrano2} although different definitions of clustering and correlation
were used.
However, we observed that, in the $r$-ensemble, a non-vanishing clustering coefficient $C$
is typically due to finite size effects, such that,
in the large size limit, network transitivity vanishes as $1/N$.
In contrast, for power-law networks characterized by $r$ above a threshold,
a significantly non-null transitivity arises, apparently persisting for large $N$.
In any case, since rewiring in the $r$-ensemble turns $C$ typically small,
transitivity does not
seem to contribute for attaining the prescribed value of $r$.
To identify the factors that contribute to $r$, it is useful to rewrite
Eq.~(\ref{def_r}).
Recalling that $\langle k^n\rangle_e=\langle k^{n+1}\rangle/\langle k\rangle$ \cite{mendes},
where $\langle \cdots \rangle$ (without subindex) means computed over the degree distribution $P(k)$,
then Eq.~(\ref{def_r}) becomes
\begin{equation} \label{r1}
r=\frac{ \langle k \rangle^2 \langle k k' \rangle_e -\langle k^2\rangle^2}
{\langle k\rangle \langle k^3\rangle -\langle k^2 \rangle^2} \,.
\end{equation}
Following the decomposition made by Estrada \cite{estrada}, notice that the quantity
$\tilde{P}_3\equiv\sum_{(k,k')}(k-1) (k'-1)$,
where the sum is performed over all the different pairs of neighboring vertices,
is the number of paths of length three, then $\tilde{P}_3=3 n_\triangle + P_3$,
where $P_3$ is the number of nontriangular paths of length three (involving four vertices).
As done in Eq.~(\ref{ntriangle}) for $3 n_\triangle$, let us scale also $P_3$ by the number of wedges
(paths of length two) $P_2=\frac{1}{2}\sum_i k_i(k_i-1)$, defining $B=P_3/P_2$ (scaled branching) \cite{estrada}.
Then Eq.~(\ref{r1}) can be written as
\begin{equation} \label{r2}
r=\frac{ \langle k \rangle (\langle k^2 \rangle-\langle k \rangle)
\left( B+1- \frac{\langle k^2\rangle}{\langle k\rangle} +C \right) }
{\langle k\rangle \langle k^3\rangle -\langle k^2 \rangle^2} \,.
\end{equation}
Expression (\ref{r2}) is determined by the three first raw moments of $P(k)$,
and also by $B$ and $C$ that are the quantities embodying the information on the linear degree-degree
correlations.
For the Poisson distributed networks, by taking into account the analytical expressions for the moments of $P(k)$,
it is straightforward to see that
\begin{equation} \label{rpoisson}
r= B - \langle k \rangle + C \,.
\end{equation}
Clearly, dissassortative correlations are favored by vanishing $C$.
Only for positive $r$ the growth of $C$ is convenient, but
$B-\langle k \rangle$ can vary in a wider interval than $C$ (twice wider in this case).
The existence of two regimes in the increase of $C(r)$, observed in Fig.~\ref{fig:ERC}, is consistent with this picture.
In other words, Eq.~(\ref{rpoisson}) indicates that, in the rewiring process of a Poisson network
to attain $r_\star$,
as soon as $P(k)$ is conserved and $C$ remains very small,
then $r$ is ruled predominantly by $B$.
For the power-law distributed networks, some qualitatively similar effects occur, as far as the
relation of $r$ with $B$ and $C$ is always linear and $C$ is constrained to a narrower interval than
$B$.
The formation of triangles also in this case contributes only for assortative values of $r$ (above the crossover),
with values of $C$ larger than in the Poisson case but still small.
Then also in this case the increase of the branching must drive rewiring.
In contrast to the Poisson case, the other terms in Eq. (\ref{r2}),
related to the moments of $P(k)$, might have a crucial influence on $r$ because
of the divergence, in the infinite network limit, of the $n$th moments for $\gamma\ge n+1$.
Let us analyze the large $k_{max}$ (hence $N$) limit,
setting aside the marginal (logarithmic) cases.
Here, we use the result $k_{max}\sim N$. However, if used instead
$k_c \sim N^{1/(\gamma-1)}$, the conclusions would remain the same.
Considering the expressions for the moments (e.g., Eq. (\ref{k1})),
one has, for $3<\gamma<4$: $r \sim [B-{\cal O}(1)]/{\cal O}(N^{ 4-\gamma })$,
while for $2<\gamma<3$: $r \sim [B-{\cal O}(N^{ 3-\gamma })]/{\cal O}(N )$,
meaning ${\cal O}(x^\alpha) \sim a x^\alpha$, with $a>0$.
To approach the lower limit $r=-1$, one must have minimal $B$.
If it is of order greater than the other terms in the numerator, then one can not have negative $r$,
because $B$ is non-negative and it will dominate the numerator.
Thus, negative $r$ can arise only if $B$ is of the same or lower order.
But in that case $r\to 0$ in the large $N$ limit.
This explains why the lower bound $r_{min}$ tends to 0 when $\gamma\le4$
(see Fig.~\ref{fig:rlim}(b)).
Along this line, however, $r_{min}$ is not expected to vanish when $\gamma>4$,
but to tend to a small finite value.
Similarly, to attain a non-null upper bound of $r$, $B$ needs to grow like the denominator,
otherwise, the upper bound will be negative and also vanish when $N\to \infty$,
leading to the collapse of the upper bound too. However, this does not necessarily happens
if $B$ is driven to grow enough during rewiring, which is what seems to be happen according to
Fig.~\ref{fig:rlim}(b).
The connection between $r$ and distance measures is not so direct analytically.
Numerical results showed that, for networks with localized distribution of links,
changing $r$ modifies significantly the mean path length only
when correlations are assortative ($r>0.5$) and $\langle k\rangle$ small.
These changes could be related to the induced fragmentation, that diminishes by increasing $\langle k\rangle$.
Then, the impact of $r$ becomes less important as $\langle k\rangle$ increases.
Meanwhile, the influence on the diameter is more dramatic.
In power-law networks, the modification of the mean path length by $r$
is a bit more marked even if
fragmentation is absent for $\langle k \rangle \ge 4$, while the diameter is not largely affected.
%
In both cases, the modification of characteristic lengths
that occur when varying $r$ may affect transport processes
and should be taken into account either when interpreting or designing
numerical experiments on top of these networks.
\section*{Acknowledgements:}
We acknowledge partial financial support from Brazilian Agency CNPq.
The authors are grateful to professor Thadeu Penna for having provided
the computational resources of the Group of Complex Systems of the
Universidade Federal Fluminense, Brazil, where some of
the simulations were performed.
\bibliographystyle{unsrt}
|
{
"timestamp": "2013-04-09T02:05:02",
"yymm": "1206",
"arxiv_id": "1206.6266",
"language": "en",
"url": "https://arxiv.org/abs/1206.6266"
}
|
\section{Introduction}
In one-spatial dimension any weak disorder is believed to have
the potentiality of converting a good metal to an insulator,
as a consequence of the Anderson localization.\cite{imry}
However, the existence of two counter examples for this common belief
has been pointed out recently, both being found in a carbon nanostructure,
exhibiting a perfectly conducting channel (PCC).
The two examples are
i) the metallic carbon nanotube (CNT),\cite{ando1}\cdash\cite{suzuura}
and ii) the zigzag graphene nanoribbon (GNR) with edge modes of
partially flat dispersion.\cite{wakabayashi1}\cdash\cite{wakabayashi3}
A PCC is immune to backward scattering;
its existence allowing the conductance of the system to remain {\it finite}
even when its length $L$ becomes infinitely long, indicating
the {\it absence} of Anderson localization.
Note also that both CNT and GNR can be regarded as a derivative form of
an infinitely large graphene sheet
possessing two energy valleys around its Dirac points $K$ and $K'$.
Since scattering between these two valleys,
i.e., the inter-valley scattering,
usually destroys the perfectly conducting channel,
we focus on the case in which the system is subject to
only long-ranged scatterers, i.e., impurities whose potential range is
larger than or comparable to the size of the unit cell.
This paper highlights the behavior of such a PCC
believed to be existent in the carbon nanostructures.
Since a PCC appears within a quantum-mechanical framework,
one may think that it is fragile against a loss of the phase coherence
due to inter-electronic Coulombic interaction, electron-phonon coupling, etc.
This naive speculation, however, turns out to be not necessarily the case,
as we further elaborate the description of this phenomenon below.
We have performed extensive numerical study of such carbon-based
disordered quasi-one-dimensional systems
using the standard tight-binding representation of
the graphene's honeycomb lattice structure (see Fig.~\ref{zigzag}).
Our treatment of the dephasing follows that of Ref.~\refcite{suzuura}.
\begin{figure}[tpb]
\centerline{\psfig{file=GNR_zz.eps,width=6.0cm}}
\vspace*{8pt}
\caption{Real space image of a GNR consisting of $M$ zigzag lines.
A CNT consisting of the same number of zigzag lines can be obtained by
rolling up this GNR and linking each site of the first zigzag line
with its partner on the $M$th row.
\label{zigzag}}
\end{figure}
\section{Perfectly conducting channels in GNR and CNT}
In the case of GNR with zigzag edge boundaries,
the existence of a PCC is originated from its peculiar band structure.
Indeed, one can give it a simple interpretation based on
the appearance of partially flat-band edge modes.\cite{fujita}
Since these flat bands appear only in a part of the one-dimensional
Brillouin zone connecting the two valleys,
if one counts the number of conducting channels
of each propagating direction at a given Fermi energy,
there always exist an excess right-going channel in one valley
and an excess left-going channel in the other valley.
Let $N_{c}$ be the number of conducting channels in each valley
in the absence of the edge modes.
The above fact indicates that the number of right-going (left-going)
channels is $N_{c}$ ($N_{c}+1$) in one valley
and $N_{c}+1$ ($N_{c}$) in the other valley.
This imbalance leads to the appearance of one PCC which is robust against
disorder,\cite{barnes1,hirose} resulting in a noteworthy statement
on the scaling of the dimensionless conductance $g(L)$,
i.e., ``$g(L)$ scales naturally to a smaller value as the length $L$ of
the disordered region increases, but in the large-$L$ limit,
$g(L)$ remains to be a finite as $\lim_{L\rightarrow\infty}
g(L) = 1$.\cite{wakabayashi1}\cdash\cite{wakabayashi3}
Interested readers may refer to Ref.~\refcite{takane_incoherent}
and references therein for more detailed discussion
on the transport characteristics of such a system
with an imbalance in the number of right- and left-going channels.
In a recent paper,\cite{takane_incoherent} one of the authors has shown
that this PCC still survives even in the incoherent regime,
where information on the phase of the electronic wave function
is essentially lost.
This unexpected robustness of the PCC in GNRs in the presence of dephasing
(see also Fig. \ref{plot-g}) stems most certainly from the fact
that the imbalance in the number of conducting channels is not a consequence
of a particular symmetry
(cf. role of the so-called ``pseudo-time reversal'' symmetry
in the CNT case, see the discussion below);
it is simply guaranteed by the existence of
partially flat-band edge modes.
\begin{figure}[btp]
\centerline{\psfig{file=plot-g.eps,width=8.0cm}}
\vspace*{8pt}
\caption{Conductance of a disordered graphene nanoribbon:
a linear plot of the dimensionless conductance $\langle g \rangle$
(main panel), and a semi-log plot of $\langle g \rangle -1$ (inset)
as a function of the length $L$ of the disordered region
measured in units of the lattice constant $a$.
Solid lines (filled dots) corresponds to the case without (with) dephasing.
We set $M=30$ and $\epsilon_F/t = 0.579$ for which
the total number of conducting channels is $11$
(i.e., $g = 11$ at $L/a \rightarrow 0$).
Other parameters are $W/t = 0.13$, $p = 0.1$ and $L_{\phi}/a = 500$,
where $W$ measures the strength of each scatterer, and
$p$ is the probability that each site is occupied by a such scatterer.
The ensemble average is performed over $10^4$ samples with different
impurity configurations.
The magnitude of the error bar at $L/a = 15000$ is of order $10^{-3}$.
\label{plot-g}}
\end{figure}
In contrast to the case of GNR, the existence of a PCC in CNTs is
a much subtle issue.
It is certainly essential that the system belongs to
the symplectic symmetry class, i.e.,
the total Hamiltonian of the system inclusive of the random potential
must, not only be time-reversal symmetric (TRS), but also fall on the case of
$\Theta^2=-1$ with $\Theta$ being the time-reversal operator.
This is typically the case with an effective spin-$1/2$ system
of a Dirac-type conic dispersion relation
though in this case TRS is not a real one (often dubbed as ``pseudo-TRS'').
This condition, therefore, will be safely satisfied in CNTs
under the influence of long-ranged potential disorder.
However, this condition alone turns out to be still not a sufficient one
for ensuring the existence of a PCC.
Much work on this subtlety,
associated with the parity of the number $N_c$ of the conducting channels
in each single Dirac cone,
has been pursued by Ando and co-workers in the context of studying
the transport characteristics of CNTs at a very early stage in the development
of this field.\cite{ando1}\cdash\cite{nakanishi}
To the best of our knowledge a clear statement on the condition for
the appearance of PCC, i.e., the idea that {\it both} of the following
two conditions:
i) appurtenance to the symplectic symmetric class,
ii) oddness of the number of conducting channels,
must be satisfied, has first appeared in Ref.~\refcite{suzuura}.
Notice that in the band structure of metallic CNTs,
only the single lowest gapless subband (of quasi-linear dispersion)
is non-degenerate,
whereas other quadratic subbands are all two-fold degenerate.
Therefore, wherever the Fermi level $\epsilon_F$ is,
the number of conducting channels in each propagating direction
in a given valley is necessarily odd.
This ensures the existence of at least one PCC per valley
(cf. Fig.~\ref{plot-c}).
Clearly, the dimensionless conductance $g$ ($=2 N_c$ in the clean limit;
here the factor 2 comes from the two valleys) decreases as disorder increases,
but remains finite due to the appearance of two PCCs.
This can be rephrased as follows:
``For a fixed strength of disorder, $g$ scales down to a smaller value
as the system becomes longer (as $L$ increases), but it approaches
asymptotically to an integral value, which is 2, in the long-$L$ limit''.
Such a behavior of the so-called ``symplectic-odd symmetry class'' has been
more profoundly elucidated
by the subsequent studies\cite{takane_DMPK}\cdash\cite{sakai}
in the context of the DMPK equation and the supersymmetric field
theory.\footnote{It seems fair to mention that a similar idea
but in a different context has already appeared
in a earlier work of Zirnbauer and co-workers.\cite{zirnbauer,mirlin}}
The existence of PCC in CNTs relies on the presence of pseudo-TRS.
Therefore, it could be fragile against any disturbances that might cause
breaking of the pseudo-TRS,
e.g., against trigonal warping of the Dirac cone.\cite{akimoto}
It is, therefore, natural to presume that PCC might be
fragile against dephasing.\cite{suzuura}
In this paper, we have extended this consideration
on the role of dephasing in the robustness of PCC in CNTs,
primarily for the comparison with the GNR case,
but with much care to the dependence on the circumference $R$
of the nanotube.\footnote{The larger the circumference $R$ is,
the more closely are the subbands spaced.
Also, the further one goes away from the Dirac point,
the stronger the trigonal warping becomes in the spectrum of a CNT.
Combining these two observations, one immediately realizes that
for a fixed value of $\epsilon_F$ and a given number of $N_c$,
the warping effects become stronger with decreasing $R$,
leading to stronger pseudo-TRS breaking.}
\begin{figure}[bth]
\centerline{\psfig{file=plot-c.eps,width=7.5cm}}
\vspace*{8pt}
\caption{Conductance of a disordered metallic carbon nanotube
as a function of the length $L$ of the disordered region
measured in units of the lattice constant $a$.
Solid; broken; dotted lines (filled circles; triangles; squares) correspond
to the case without (with) dephasing,
and of a different diameter of the nanotube:
$M=150$; $M=100$; $M=50$.
The three cases are also represented by different colors: red; blue; gray.
We set $\epsilon_{F}/t = 0.042$, $0.06309$, and $0.12623$, respectively
to the above three cases so that the initial value of $\langle g \rangle$
always takes the same value: $\langle g \rangle_{L \rightarrow 0} = 2 N_c = 6$.
Other parameters are set as
$W/t = 0.3$ and $p = 0.1$ and $L_{\phi}/a = 50$.
The ensemble average is performed over $5000$ samples.
The magnitude of the error bar at $L/a = 500$ is of order $10^{-3}$.
\label{plot-c}}
\end{figure}
\section{Sketch of the numerical analysis and its implications}
Let us consider again the case of a GNR with $M$ zigzag lines
as shown in Fig.\ref{zigzag}.
The electronic states in this nanostructure
is described by a tight-binding Hamiltonian,
\begin{equation}
\label{ham}
H = - \sum_{i,j} \gamma_{i,j}|i\rangle \langle j|
+ \sum_{i}V_{i}|i\rangle \langle i| ,
\end{equation}
where $|i\rangle$ and $V_{i}$ represent the localized electron state
and the impurity potential, respectively, on site $i$,
and $\gamma_{i,j}$ is the transfer integral between sites $i$ and $j$
with $\gamma_{i,j} = t$ if $i$ and $j$ are nearest neighbors
and $\gamma_{i,j} = 0$ otherwise.
We assume that the zigzag lines are infinitely long.
Instead, we distribute impurities (randomly) only in a finite region
(the disordered region, composed of $N$ columns)
of this infinitely long ribbon.
What we have been calling the ``system's length $L$'' so far is
now identified as the length $N$ of this disordered region, i.e., $L/a = N$.
In the actual computation, we have numerically estimated
the dimensionless conductance $g(L)$
using the Landauer formula and recursive Green's function method.
We assume that the potential profile of the scatterers is gaussian with
its characteristic range $d$ chosen to be $d/a = 1.5$,
a value large enough for avoiding the inter-valley scattering.
We then let the amplitude of this gaussian random potential $w$
be uniformly (randomly) distributed within the range of $|w| \le W/2$.
As we mentioned earlier, the effects of dephasing has been taken account of
by the approach employed also in Ref.~\refcite{suzuura},
i.e., by separating the entire sample into several segments of
equal length $L_{\phi}$.\footnote{The rule of this game is the following:
each time the incident electron leaves a segment and enters the next
one, he loses his phase memory.
As for concrete implementation of this to realistic carbon nanostructures,
we refer interested readers to our forthcoming publication.}
Let us now look at Fig.~\ref{plot-g}.
The main panel shows a linear plot of $\langle g \rangle$,
indicating that $\langle g \rangle$ converges to unity
irrespective of the presence or absence of dephasing;
a clear signature of the appearance of a PCC.
This partly confirms numerically our earlier prediction
based on a Boltzmann equation approach, stating that
``the PCC in a GNR is so robust that it may possibly survive
even into the incoherent regime.''\cite{takane_incoherent}
In our plots one can also observe that $\langle g \rangle$ in the presence
of dephasing is slightly larger than the case of no dephasing.
This feature is more clearly highlighted in the semilog plot
of $\langle g\rangle - 1$.
When $L/a \gtrsim 10^4$ (i.e., $L$ is very large),
the value of $\langle g\rangle - 1$ without dephasing scales away
from a quasi-linear (stable) behavior in the presence of dephasing.
This is probably due to
residual inter-valley scattering.
Notice that here dephasing plays indeed the role of
{\it stabilizing the PCC} against weak inter-valley scattering.
Let us finally analyze our numerical data for CNTs (Fig. \ref{plot-c}).
We make a few remarks on our CNT data,
which show a number of contrasting features to the case of GNR.
First, the value of $\langle g \rangle$ is {\it smaller}
in the presence of dephasing than in the absence of dephasing,
which is consistent with the result of Ref.~\refcite{suzuura}.
This simply opposes the GNR case.
In some cases ($M = 50$ and $100$) $\langle g \rangle$ decreases
even below the ``protected'' value of 2 as $L/a$ increases.
As mentioned earlier, trigonal warping of the Dirac cone is omnipresent
whenever the Fermi level is away from the Dirac point,
and this can possibly come into play in the transport characteristics
of a CNT,\cite{akimoto} when its diameter or $M$ is not large enough.
This seemingly weak effect associated with the breaking of pseudo-TRS
is shown to give a destructive influence on the scaling behavior
of $\langle g \rangle$ in the large-$L/a$ limit.
Dephasing does not help.
These observations lead us to our second conclusion that
a stable existence of the two PCCs in a CNT is restricted
to the case of a very {\it large diameter}
and of a relatively {\it small doping}.
\section*{Acknowledgments}
This work was supported in part by a Grant-in-Aid for Scientific
Research (C) (No. 21540389)
from the Japan Society for the Promotion of Science,
and by the National Science Foundation under Grant No. NSF PHY05-51164.
|
{
"timestamp": "2012-06-28T02:03:11",
"yymm": "1206",
"arxiv_id": "1206.6234",
"language": "en",
"url": "https://arxiv.org/abs/1206.6234"
}
|
\section*{Introduction}
A spherical object $e$ in an exact linear triangulated category $(T,[\ ])$ is one such
that $\dim\operatorname{Hom}(e,e[i])=\dim H^i(S^d,\mathbb{R})$, the Betti numbers of a
$d$-dimensional sphere for some fixed $d$. This concept is especially
useful when $T=D^b(X)$ for some
$d$-dimensional Calabi-Yau variety $X$ or when $T$ is a $d$-Calabi-Yau
category. This is because such objects have, in a suitable sense, the
fewest possible derived self-maps. There has been a great deal of
interest in them in recent years as they hold the key to understanding
the categorical structure of $T$ and its automorphism group $\operatorname{Aut}(T)$. For
example, it is conjectured that they give rise to a generating set for
$\operatorname{Aut}(T)$ in the case when $T=D(X)$, of a K3 surface.
The spherical objects also play a central role in our understanding of
Bridgeland stability conditions for some surfaces (see \cite{BriK3})
essentially because of the central role they play in the derived
category of the surface. It is likely that they will play a similarly
crucial role in our understanding of stability conditions for higher
dimensional Calabi-Yau varieties.
In an important paper, \cite{SeidelThomas}, Seidel and Thomas show
that certain series of spherical objects give rise to actions of the
braid group on the derived category. This is done by associating
an equivalence $\Phi_a$ of the derived category to each spherical object $a$ (known
as a spherical twist). In the K-theory of the derived category, these
are reflections and so they are sometimes called reflection
functors. It was observed in that paper that when two spherical
objects $a$ and $b$ are completely orthogonal (in other words,
$\operatorname{Hom}(a,b[i])=0$ for all integers $i$) then the associated spherical
twists commute. This is because
$\Phi_a\circ\Phi_b\cong \Phi_{\Phi_b(a)}\circ\Phi_b$ (see \cite[Lemma
2.11]{SeidelThomas}) and $\Phi_b(a)\cong a$ as can be checked by direct and
easy computation (see \cite[Proposition 2.12]{SeidelThomas}). Our
first result in this note is to show that the converse also holds: if
two spherical twists commute then either they are equal or the
associated spherical objects are (completely) orthogonal.
We then turn our attention to the special case where the spherical
objects are actually vector bundles. This is an important class of
examples. The associated spherical twists have Fourier-Mukai kernel
given by a sheaf parametrizing properly-torsion free sheaves whose
singularity set is a single point of $X$. Our second main result is to
show that this also has a converse: if $\Phi$ is an exact equivalence of
the derived categories of Calabi-Yau $d$-folds such that its
Fourier-Mukai kernel is a sheaf parametrizing properly-torsion free sheaves whose
singularity set is zero dimensional then it must be a composite of
commuting spherical twists. The difficulty in this is to show that the
double dual of the kernel (which must be locally-free by assumption)
can be reduced essentially to a sum of (completely) orthogonal
spherical bundles. To establish this we need to generalise the
computations of $\operatorname{Ext}$ groups given by Mukai (\cite{MukTata}) and
which are so crucial in describing stability conditions for surfaces.
\section{Fourier-Mukai Transforms}\label{s:FMT}
In this paper we shall take a Fourier-Mukai transform (or FM transform
for short) to be an equivalence of categories of the (bounded) derived
category of sheaves $D(X)$ and $D(Y)$ on a smooth (complex) projective
varieties $X$ and $Y$ given by correspondences of the form
$\Phi_F:E\mapsto Ry_*(x^*E\otimes F)$, where $F$ is a sheaf on
$X\times Y$ called the \textbf{kernel} of the transform. These are
discussed in \cite{HuyBook} and \cite{BBHBook}.
Recall that we say that a family of sheaves $\mathcal{M}$ is
\textbf{strongly simple} if it consists of simple sheaves and
if $\operatorname{Ext}^i(E,E')=0$ for all $i$ and $E\neq E'$ in $\mathcal{M}$.
(see \cite{BrEquiv}):
\begin{thm}[Bridgeland]\label{t:TB} The kernel $F$ gives rise
to a FM transform if and only if the restrictions $F$ to $X$ form a
strongly simple family and $F_x\otimes K_X\cong F_x$ for all $F_x$ in
the family, where $K_X$ is the canonical bundles of $X$.
\end{thm}
The last condition is vacuous for Calabi-Yau $d$-folds.
The theorem gives us an easy way to recognise when a family of sheaves gives
rise to an FM transform.
We aim to study a special class of Fourier-Mukai Transforms which
arise from so called spherical bundles. These were first studied by
Mukai (\cite{MukTata}) in the case where $X$ is a K3 surface.
Notation: we let $E^{\vee}=R\operatorname{\mathcal{H}\mathit{om}}(E,\mathcal{O}_X)$ denote the derived dual
of an object $E$ of $D(X)$.
\section{Commuting Spherical Objects}\label{s:except}
Throughout this section we assume that $X$ is a smooth Calabi-Yau variety of
dimension $d$.
\begin{defn} An object $E$ of $D(X)$ is \textbf{exceptional} if
$\operatorname{Ext}^i(E,E)$ is a small as possible (the precise definition depends
on $X$ but we will not need to be very definite in what follows). We say that $E$ is
\textbf{spherical} if
$\dim\operatorname{Ext}^i(E,E)=1$ for $i=1$ or $i=d$ and is zero otherwise. We
say that $E$ is \textbf{rigid} if just $\operatorname{Ext}^1(E,E)=0$.
\end{defn}
Note that a simple rigid sheaf on a Calabi-Yau 2 or 3-fold is automatically
exceptional and spherical by Serre duality.
To any vector bundle $E$ we can associate a canonical (surjective) map
$E\otimes\operatorname{Hom}(E,\mathcal{O}_x)\to\mathcal{O}_x$ given by evaluation. We shall denote
the domain of such maps by $E_H$ for short and the kernel by
$E_x$. This extends to a map for any object $E$ of $D(X)$. We shall
denote a choice of cone on such a map by $F_x$. Then when $E$ is a
bundle, $E_x=F_x[1]$.
In a ground breaking paper by Seidel and Thomas \cite{SeidelThomas} it is shown
(in somewhat greater generality) that when $E$ is a spherical object
in $D(X)$,
the family of $F_x$ give rise to a Fourier-Mukai transform $D(X)\to
D(X)$, denoted $\Phi_E$ or, more usually, $T_E$ (the \textbf{spherical
twist} associated to $E$).
The kernel
of the transform is given by the shift by $-1$ of the cone on the canonical map
$\operatorname{R\mathcal{H}\mathit{om}}(\pi_1^* E,\pi_2^*E)\to \mathcal{O}_\Delta$
given by adjunction from the composite map
\[\xy\xymatrix@C=7ex{\pi_2^*E\ar[r]^/-8pt/{\pi_2^*E\otimes
\rho}& {\pi_2^*E\otimes\mathcal{O}_\Delta}\ar[r]^{\sim}&
{\pi_1^*E \mathbin{\smash{\buildrel L\over\otimes}} \mathcal{O}_\Delta}}\endxy\]
where $\pi_i:X\times X\to X$ are the two projection maps and
$\rho:\mathcal{O}_{X\times X}\to\mathcal{O}_\Delta$ is the canonical restriction
map. We shall denote the functor $\operatorname{R\mathcal{H}\mathit{om}}(\pi_1^*(E\mathbin{{\buildrel L\over\otimes}}
{-}),\pi_2^*E)$ by $\Psi_E$. So for all $G\in D(X)$ we have a
triangle
\[\Phi_E(G)\to\Psi_E(G)\to G\]
which is natural in $G$ (rather unusually for triangles of functors).
Their proof that these do give
Fourier-Mukai transforms is fairly direct although a somewhat
more elegant proof was later given by Ploog (\cite{PloogThesis}) using a clever
choice of spanning class (see \cite{HuyBook} for further details). In this
paper, we shall give yet another less elegant but more elementary
proof in the spirit of Mukai's original paper (\cite{MukTata}).
The main point of the \cite{SeidelThomas} paper was to show that certain
families of spherical objects give rise to a representation of the
Braid group on the derived category. As a corollary of the key
computational result they also show that if $E$ and $F$ are two
spherical objects such that $\operatorname{Hom}(E,F[i])=0$ for all $i$ then their FM
transforms commute. We can generalise this a little as follows.
\begin{defn}
We call a finite collection $E_i$, $1\leq i\leq n$ of
objects of $D(X)$ \textbf{strongly spherical} if
\begin{equation}
\dim\operatorname{Hom}(E_i,E_j[k])=\begin{cases} 1& \text{if }i=j\text{ and
(}k=0\text{ or }k=d\text{)}\\
0& \text{otherwise}\end{cases}
\end{equation}
In other words, each of the numbers $\dim\operatorname{Hom}(E_i,E_j[k])$ are
as small as possible.
\end{defn}
Then for a strongly spherical collection $\Gamma=\{E_i\}_{i=1}^n$ we have a
finite cone (in the sense of limits) $E_i\boxtimes
E_i^{\vee}\to\mathcal{O}_\Delta$. This has a limit (up to shift) constructed explicitly as
the cone on $\bigoplus_{i=1}^n E_i\boxtimes
E_{i}^{\vee}\to\mathcal{O}_\Delta$. Denote the limit by $E_{1,2,\ldots,n}$ and
its associated integral transform by $\Phi_\Gamma$. Then the
following is an easy exercise
\begin{prop}[\cite{SeidelThomas}] For any strongly spherical
collection $\Gamma$ of objects on a
Calabi-Yau $d$-fold,
$\Phi_\Gamma=\Phi_{E_1}\raise2pt\hbox{$\scriptscriptstyle\circ$}\Phi_{E_{2}}\raise2pt\hbox{$\scriptscriptstyle\circ$}\cdots\raise2pt\hbox{$\scriptscriptstyle\circ$}\Phi_{E_{n}}$
\end{prop}
In fact, there is a converse:
\begin{thm} \label{t:commute}
Suppose $E$ and $F$ are two spherical objects
in $D(X)$ such that $\Phi_E$ and $\Phi_F$ are distinct.
Then $\Phi_E\raise2pt\hbox{$\scriptscriptstyle\circ$}\Phi_F\cong\Phi_F\raise2pt\hbox{$\scriptscriptstyle\circ$}\Phi_E$ implies that $F\in E^\perp$.
\end{thm}
Before proving this we prove a technical lemma first proposed by David
Ploog in his thesis (\cite[Question 1.23]{PloogThesis}). We let
$\langle E\rangle$ denote the smallest triangulated category
containing $E$ in $D(X)$. This means that each object has a filtration
whose factors are all shifts of isomorphic copies of $E$.
\begin{lemma}\label{l:convploog}
Suppose $E$ is a spherical object of $D(X)$ and $d=\dim X\geq 2$. Then, for any object
$G\in D(X)$, $\Phi_E(G)=G[-d]$ if and only if $G\in\langle E\rangle$.
\end{lemma}
\begin{proof}
Recall that $G\in E^\perp$ if and only if $\Phi_E(G)=G$
(see \cite[Lemma 1.22]{PloogThesis}).
The reverse implication of our lemma was also proved in \cite[Lemma 1.22]{PloogThesis}. So suppose
$\Phi_E(G)=G[-d]$. Define
\[d_E(G)=\sum_{i=-\infty}^{\infty}\dim\operatorname{Hom}(E,G[i]).\]
We induct on $d_E(G)$. If $d_E(G)=0$ then $G\in E^\perp$ and
so $\Phi_E(G)=G$ and hence $G=0$. If $d_E(G)=1$ (wlog $\operatorname{Hom}(E,G)\neq0$) then $G[-d]$ fits in a
triangle
\[G[-d]\buildrel f^\vee\over\longrightarrow E\buildrel f\over\longrightarrow G,\]
where the unique maps (up to scale) are Serre dual to each other. But
then $f\raise2pt\hbox{$\scriptscriptstyle\circ$} f^\vee:G[-d]\to G$ must be Serre dual to the identity
$G\to G$ and so cannot vanish. But $f\raise2pt\hbox{$\scriptscriptstyle\circ$} f^\vee=0$ as the composite
of two consecutive maps of a triangle must always vanish. The
contradiction shows that $d_E(G)$ cannot equal $1$. Now assume that for
all $n<d_E(G)$ we know that if $d_E(G')=n$ and $\Phi_E(G')=G'[-d]$ then
$G'\in\langle E\rangle$. Pick any $f\in\bigoplus\operatorname{Hom}(E,G[i])$ and again
without loss of generality assume $i=0$. Let $C$ be a cone on $f:E\to
G$. Then $\Phi_E(C)=C[-d]$ because $\Phi_E(f)=f[-d]$. But we also
have that $d_E(C)=d_E(G)-2$ by applying $\operatorname{Hom}(E,-)$ to the triangle
defining $C$ and because $\dim X>1$. Then by induction $d_E(G)$ must be
even and $C\in\langle E\rangle$. Hence, $G\in\langle E\rangle$ as it
is an extension of $C$ by $E$.
\end{proof}
\begin{remark}
We can extract a bit more from the proof by observing
that it shows that if $G\in\langle E\rangle$ has $d_E(G)=2$ then $G\cong E[i]$ for
some integer $i$. In fact, we can go further to observe that
$d_E(G)/2$ is the length of a filtration of $G\in\langle E\rangle$
with factors given by shifts of $E$ (always under the
assumption that $d>1$). It follows that the length of such a
filtration is well defined as a function of $G$.
We shall use this in the following way: if $F\in\langle E\rangle$ is
spherical then applying
$F[i]\to$ to the triangle $F[-d]\to \Psi_E(F)\to F$ implies that
$d_E(F)=2$ and so $F\cong E[i]$ for some integer $i$.
\end{remark}
\begin{lemma}\label{l:swap}
Suppose $E$ and $F$ are two spherical objects such that $\Phi_E$ and
$\Phi_F$ commute. Then $G\in\langle E\rangle$ if and only if
$\Phi_F(G)\in\langle E\rangle$.
\end{lemma}
\begin{proof}
For any $G\in\langle E\rangle$ we have
\[\Phi_E(\Phi_F(G))\cong\Phi_F(\Phi_E(G))\cong\Phi_F(G[-d]).\]
So $\Phi_F(G)\in\langle E\rangle$ by Lemma \ref{l:convploog}. Applying this to
$G=\Phi^{-1}_F(G')$ gives us the converse as well.
\end{proof}
\begin{proof}[Proof of Theorem \ref{t:commute}.]
Assume that that $\Phi_E$ and $\Phi_F$ commute and suppose that $E$
and $F$ are not orthogonal.
Then $\Phi_E(F)\in\langle F\rangle$ by Lemma \ref{l:swap}. But
$\Phi_E(F)$ is spherical and so by the remark above, $\Phi_E(F)=F[i]$
for some $i$. By assumption, we have
a non-zero map $E\to F$ (replacing $F$ by a suitable shift if necessary).
Applying the composite functor $\Phi^n_E[nd]$, for any positive integer
$n$ to this gives a non-zero map
$E\to F[n(i+d)]$. But $D(X)$ has bounded cohomology and
exts and so $i=-d$.
So $\Phi_E(F)\in\langle E\rangle$ by Lemma \ref{l:convploog} again. Then
$F\in\langle E\rangle$ by Lemma \ref{l:swap}. By the remark,
$F=E[i]$ for some $i$ and that implies that $\Phi_E=\Phi_F$
contradicting our assumption.
\end{proof}
\section{Spherical Bundles}
We shall now restrict our attention to the case of spherical
bundles on complex Calabi-Yau $d$-folds. We shall see that this case can be tackled more directly in
the spirit of Mukai's paper.
We first assume that $E$ is a simple rigid bundle and
consider the double
exact complex associated to the bi-functor $\operatorname{Ext}^*(-,-)$ applied to
the short exact sequence\footnote{The
reader is urged to write
a large part of this double complex out on a large piece of paper
before proceeding!}
\begin{center}$0\to E_x\to E_H\to\mathcal{O}_x\to0.$
\end{center}
Using the fact that $\operatorname{Ext}^i(E_H,\mathcal{O}_x)=0$ for all $i>0$ and
$\operatorname{Ext}^i(\mathcal{O}_x,E_H)$ vanishes for all $i<d$, we have
$\dim\operatorname{Ext}^1(\mathcal{O}_x,F)=1$, $\dim\operatorname{Ext}^1(F,\mathcal{O}_x)=\operatorname{rk}(E)^2-1+d$,
$\dim\operatorname{Hom}(F,E_H)=\dim\operatorname{Hom}(E_H,E_H)=\operatorname{rk}(E)^2$ and,
crucially, $\operatorname{Ext}^1(F,E_H)=0$ (using the fact that $d>2$ for this: the
case $d=2$ is much simpler and is left to the reader).
From this we have
\[\dim\operatorname{Ext}^1(F,F)=d-1+\dim\operatorname{Hom}(F,F)\]
Since $\operatorname{Ext}^1(E_H,\mathcal{O}_x)=0$, we have that the map
\[\operatorname{Ext}^2(\mathcal{O}_x,F)\to\operatorname{Ext}^2(E,F)\]
vanishes and so
$\operatorname{Ext}^2(F,F)\to\operatorname{Ext}^2(\mathcal{O}_x,F)$ surjects.
The map \[\operatorname{Hom}(F,F)\to\operatorname{Ext}^1(\mathcal{O}_x,F)\cong\mathbf{C}\] is the boundary map and
must be non-zero as the identity map is contained in the
domain. Hence, this map also surjects. We can conclude
\[\dim\operatorname{Hom}(F,F)=\dim\operatorname{Hom}(E_H,F)+1\]
The following result is a stronger version of \cite{MukTata}, Prop 3.9.
\begin{lemma} The map $\operatorname{Hom}(E_H,E_H)\to\operatorname{Hom}(E_H,\mathcal{O}_x)$ injects
\end{lemma}
\begin{proof}
Consider a map $f:E_H\to E_H$. If we fix a basis for $\operatorname{Hom}(E,\mathcal{O}_x)$,
then $f$ is given by an $r\times r$ matrix with scalar entries (since
$E$ is simple). The image of $f$ is given by a subspace $V$ of
$\operatorname{Hom}(E,\mathcal{O}_x)$ and $f$ is zero if and only if this subspace is
zero. But if it is not zero then the image of $E\otimes V$ in $\mathcal{O}_x$
is non-zero and so the image of $f$ in $\operatorname{Hom}(E_H,\mathcal{O}_x)$ is also
non-zero.
\end{proof}
We deduce that $\operatorname{Hom}(E_H,F)=0$ and hence $\dim\operatorname{Hom}(F,F)=1$.
Now we can conclude that $\dim\operatorname{Ext}^1(F,F)=d$.
Next we consider two distinct points $x$ and $y$ of $X$ and the two
associated kernels $F_x$ and $F_y$. Since $\operatorname{Ext}^i(\mathcal{O}_x,\mathcal{O}_y)=0$ for
all $i$ and $F_y$ is locally-free away from $x$ we can conclude from
the double exact sequence associated to the two sequences for $F_x$
and $F_y$, that $\operatorname{Hom}(F_x,F_y)\cong\operatorname{Hom}(E_H,F_y)=0$ and
$\operatorname{Ext}^1(F_x,F_y)\cong\operatorname{Ext}^1(E_H,F_y)$ which is also zero.
The following generalises Corollary 2.12 of \cite{MukTata}.
\begin{prop}\label{p:exti}
If $E$ is a simple rigid vector bundle and $d>3$ then there are
natural isomorphisms
\[\operatorname{Ext}^i(F_x,F_y)\cong\operatorname{Ext}^i(\mathcal{O}_x,\mathcal{O}_y)\oplus\operatorname{Ext}^i(E_H,E_H)\]
for all $x,y\in X$ (not necessarily distinct) and $1<i<d-1$.
\end{prop}
\begin{proof}
The proof uses the double exact sequence
we considered above. Start at $i=2$ and observe that
$\operatorname{Ext}^n(E_H,F_y)\cong\operatorname{Ext}^n(E_H,E_H)$ for $1\leq n<d$ (the case
$n=1$ follows because $E$ is rigid) and there is a
natural injection of $\operatorname{Ext}^n(E_H,E_H)$ into $\operatorname{Ext}^n(F_x,E_H)$. We also
have $\operatorname{Ext}^n(F_x,\mathcal{O}_y)\cong\operatorname{Ext}^{n+1}(\mathcal{O}_x,\mathcal{O}_y)$ and so the map
$g:\operatorname{Ext}^n(F_x,E_H)\to\operatorname{Ext}^n(F_x,\mathcal{O}_y)$ is given by the composite
\[\operatorname{Ext}^n(F_x,E_H)\to\operatorname{Ext}^{n+1}(\mathcal{O}_x,E_H)\to\operatorname{Ext}^{n+1}(\mathcal{O}_x,\mathcal{O}_y)\to\operatorname{Ext}^n(F_x,E_H),\]
But $\operatorname{Ext}^{n+1}(\mathcal{O}_x,E_H)=0$ and so
the composite vanishes for $n=1,\dots,d-1$. Moreover, the surjection
$\operatorname{Ext}^n(F_x,F_y)\to\operatorname{Ext}^n(F_x,E_H)$ splits naturally since the image
is \[\operatorname{Ext}^n(E_H,E_H)\cong\operatorname{Ext}^n(E_H,F_y)\] and the image of this in
$\operatorname{Ext}^n(F_x,F_y)\to\operatorname{Ext}^n(E_H,E_H)$ is the identity.
\end{proof}
This shows that $\{F_y\}$ is a strongly simple family. Using Theorem
\ref{t:TB}, we have an alternative proof of
\begin{thm}[\cite{SeidelThomas}, \cite{PloogThesis}] If $E$ is an spherical bundle on a Calabi-Yau $d$-fold
$X$ then the moduli space of sheaves $\{F_x\}$ constructed above is
naturally isomorphic to $X$ and gives rise to a non-trivial Fourier-Mukai
transform $D(X)\to D(X)$.
\end{thm}
\section{Recovering the Strongly Spherical Collection}
\label{s:recover}
We shall now consider the reverse process: given a Fourier-Mukai
transform determined by a family of non-locally-free torsion-free
sheaves $\{F_y\}$ with dimension 0 singularity sets, can we
find a strongly spherical collection of bundles
$\Gamma=\{E_i\}_{i=0}^n$ such that $F_x$ is the kernel of the
canonical map
$\bigoplus_{i=0}^nE_i\otimes\operatorname{Hom}(E_i,\mathcal{O}_x)\to \mathcal{O}_x$? We shall see that this is indeed
possible. The first observation we need to make is
that the parameter space $\{F_y\}$ is naturally (isomorphic to) $X$. This is immediate
since the map $F_y\to F_y^{**}$ has quotient $\mathcal{O}_T$ and we see that
the parameter space $Y$ sits inside a space of kernels
$F_y^{**}\to\mathcal{O}_T$ as $T$ varies in $\operatorname{Hilb}^{|T|}(X)$.
Since the moduli space must be
complete we see that the map $Y\to X$ given by the singularity of
$F_y$ is an isomorphism. We also see that $F_y^{**}=F_{y'}^{**}$ for
any pair $y$ and $y'$. We shall write $F$ for $F_y^{**}$. Since $F$ is
locally-free away from $x$ and from $y$ we see that $F$ is
locally-free over the whole of $X$. Without loss of generality we
assume in what follows that the isomorphism $Y\cong X$ is the identity.
Using the double exact sequence from the previous section we can
immediately conclude that
$\dim\operatorname{Hom}(F,F)=\operatorname{rk}(F)$ and $\operatorname{Hom}(F,F)\cong\operatorname{Hom}(F,\mathcal{O}_x),$
for any $x\in X$. We can also conclude that $\operatorname{Ext}^i(F,F)=0$ for $i=1,\dots,d-1$.
If $\operatorname{rk}(F)=1$ then $F$ must be exceptional. Assume now that
$\operatorname{rk}(F)>1$. We observe also that the kernels of a suitable family of
maps $\lambda_x:F\to\mathcal{O}_x$, as $x$
varies, generate the family $\{F_x\}$. Since $\dim\operatorname{Hom}(F,F)>1$ we can
find an endomorphism of $F$ which has rank less than $r$ and so we have a
sheaf $P$ which factors such an endomorphism. We can assume $P$ is
reflexive by factoring the torsion out of $F/P=Q$, say. We now consider the
double exact sequences associated to pairs of short exact sequences
taken from
\[\begin{split}0\to F_x\to &\;F\to\mathcal{O}_x\to 0,\\
0\to P\to &\; F\to Q\to 0 \text{\qquad and}\\
0\to K\to &\;F\to P\to 0.
\end{split}
\]
From these it follows that $\operatorname{Hom}(P,F_x)=0$ and
$\operatorname{Hom}(Q,F_x)=0$. It follows from this that $\operatorname{Ext}^1(Q,F_x)=0$ and, crucially,
$\operatorname{Hom}(Q,F)=\operatorname{Hom}(Q,\mathcal{O}_x)$ and $\operatorname{Hom}(P,F)=\operatorname{Hom}(P,\mathcal{O}_x)$. These imply
that both $P$ and $Q$ are locally-free.
We now appeal to the following useful technical result (true in much
greater generality for suitable objects in any noetherian abelian category).
\begin{lemma} If $E$ is a torsion-free sheaf which is not simple then
there exists a simple sheaf $G$ (not necessarily unique) and an injection
$\alpha:G\hookrightarrow E$ and a surjection $\beta:E\twoheadrightarrow G$ such
that either $\beta\alpha$ is zero or the identity. Moreover, if $G\to
E$ is any non-zero map then it must inject.
\end{lemma}
\begin{proof}
Since $E$ is not simple, we can consider the set of sheaves $G$ which
factor non-isomorphisms $E\to E$. Such a sheaf $G$ is automatically
torsion-free and gives rise to maps $\alpha$ and $\beta$. The set is
partially ordered by compositions $E\twoheadrightarrow
G\twoheadrightarrow G'\hookrightarrow E$. Since
$r(G')<r(G)$ (otherwise the kernel of $G\to G'$ would be a torsion
sheaf), we can pick (using Zorn's Lemma) a minimal element with respect to this order. Call
it $G$. Then $G$ is simple since otherwise we could factor a map $G\to
G$ via $G'$ which would be strictly smaller than $G$ in the order. Now
the composite $\beta\alpha$ is either zero or a multiple of the
identity (in which case we replace $\beta$ with a suitable multiple).
The last statement follows because if such a map is not injective then
the image would be strictly smaller in the order.
\end{proof}
Applying this to our current situation we may assume $P$ is simple and
is minimal with respect to the ordering of the proof above.
Moreover, any (non-zero) map $P\to F$ must inject.
We now repeat this construction in a family. Suppose, as in the
previous section, that $\mathbb{E}$ is the universal sheaf corresponding to
the family $\{F_y\}$ and consider $S=\mathbb{E}^{**}/\mathbb{E}$. Since, $F_y$ is
singular only at $y$ we have that $S|_{X\times\{y\}}=\mathcal{O}_y$ and so
(wlog) $S$ is supported on the diagonal $\Delta\subset X\times X$ and is
locally-free there. If we twist by $\pi_2^*(\pi_{2*}S)^*$ then we may
assume without further loss of generality that $S=\mathcal{O}_\Delta$.
Observe that $\mathbb{E}^{**}$ is flat over both
projections and has the property that $\mathbb{E}^{**}|_{X\times\{y\}}=F$ for
all $y\in X$ and so is locally-free. Observe we have a diagram of
natural transformations of functors
\[\Phi_\mathbb{E}\longrightarrow\Phi_{\mathbb{E}^{**}}\longrightarrow \mathrm{Id}\]
This diagram has the
property that for any object $G\in D(X)$ there is a distinguished
triangle
\begin{equation}\Phi_\mathbb{E} G\longrightarrow\Phi_{\mathbb{E}^{**}}G\longrightarrow G.\label{eq-tri}\end{equation}
which is natural in $G$. Since $\Phi_\mathbb{E} F=F[-d]$ we see that
$F\to \Phi_\mathbb{E} F[1]$ is zero and so
$\Phi_{\mathbb{E}^{**}}(F^*)\cong F^*\oplus
F^*[-d]$. Hence, $\mathbb{E}^{**}|_{\{x\}\times X}\cong F^*$.
\begin{lemma} In the given situation, $\Phi^0_{\mathbb{E}^{**}}(P^*)\cong
P^*$.
\end{lemma}
\begin{proof}
By the semi-continuity of direct images
$\Phi^0_{\mathbb{E}^{**}}(P^*)$ is locally-free of rank
$r(P)$. We also have $\operatorname{Hom}(P,F_y)=0=\operatorname{Ext}^1(P,F_y)$ and so $\Phi^0_\mathbb{E}(P^*)=0=\Phi^1_{\mathbb{E}}(P^*)$.
The the cohomology of the triangle~(\ref{eq-tri}) provides the
required isomorphism.
\end{proof}
If we use the Leray-Serre spectral sequence for $\pi_2$ we see that
\begin{align*}
H^0((P^*\boxtimes
P)\otimes\mathbb{E}^{**})&\cong H^0(R^0\pi_{2*}(\pi_1^*P^*\otimes\mathbb{E}^{**})\otimes P)\\
&\cong H^0(R^0\Phi_{\mathbb{E}^{**}}(P^*)\otimes P)\\
&\cong H^0(P^*\otimes P).\end{align*}
So we have natural isomorphisms $H^0((P^*\boxtimes
P)\otimes\mathbb{E})\cong\operatorname{Hom}(P^*,P^*)\cong\mathbf{C}\langle\operatorname{id}\rangle$
and dually we also have $H^0((P\boxtimes
P^*)\otimes\mathbb{E})\cong\operatorname{Hom}(P^*,P^*)$.
We can conclude that there are unique maps (up to scalars) $\alpha:P\boxtimes
P^*\to\mathbb{E}$ and $\beta:\mathbb{E}\to P\boxtimes P^*$. If we apply
$R^0\pi_{2*}\raise2pt\hbox{$\scriptscriptstyle\circ$} (P^*\boxtimes P)\otimes(-)$ to these maps we obtain the
maps $\alpha'$ and $\beta': P^*\otimes P\to P^*\otimes P$. But
$\alpha'|_\mathcal{O}$ has image $\mathcal{O}$ and $\beta'$ is non-zero on this copy
of $\mathcal{O}$ (corresponding to the identity element in $P^*\otimes
P$). Hence, $\beta'\raise2pt\hbox{$\scriptscriptstyle\circ$}\alpha'$ is not zero and so
$\beta\raise2pt\hbox{$\scriptscriptstyle\circ$}\alpha$ is also not zero. But $P$ is simple and thus
$P\boxtimes P^*$ is also simple (using the Leray-Serre spectral sequence
again). Consequently, $\beta\raise2pt\hbox{$\scriptscriptstyle\circ$}\alpha$ is the identity map. This
implies that $\mathbb{E}^{**}=(P\boxtimes P^*)\oplus Q$ for some vector
bundle $Q$. It also follows that $P$ is spherical as it is a direct
summand of $F$.
But now, $Q$ enjoys the same properties as $\mathbb{E}^{**}$ and again we can
choose a simple $P'$ such that $Q=(P'{}^*\boxtimes P')\oplus Q'$. Repeating,
we have $\mathbb{E}^{**}=\bigoplus_{i=1}^n \mathbb{E}_i$, where $\mathbb{E}_i\cong P_i\boxtimes
P_i^*$ and $P_i$ are spherical bundles. Observe that the uniqueness
of $\alpha$ and $\beta$ imply that $\operatorname{Ext}^k(P_i,P_j)=0$ for all $k$ and
$i\neq j$.
We have thus proved:
\begin{thm} Let $X$ and $Y$ be (smooth) Calabi-Yau $d$-folds. If
$\mathbb{F}\to X\times Y$ is a family of properly
torsion-free sheaves over $X$ parametrized by $Y$ with 0-dimensional
singularity sets and $\Phi_\mathbb{F}$ is a Fourier-Mukai Transform then
\begin{enumerate}
\item there is a isomorphism $\phi:Y\to X$ and
\item there exists a unique strongly spherical collection of bundles
$\Gamma=\{P_i\}_{i=0}^n$ on $X$ such that $(1\times\phi)^*\Phi_\Gamma=\Phi_F$.
\end{enumerate}
\end{thm}
In the case of a K3 surface, if $\operatorname{Pic} X=\mathbb{Z}\langle h\rangle$ then
strongly spherical collections can only have cardinality $1$. This can
be easily seen from the numerical invariants of such a collection. In
that case, we recover Yoshioka's result (\cite{YoshAb}) that a family of properly torsion-free
sheaves giving rise to an FM transform arise from a spherical
object. But in general, this will not be the case. For example, if $L$
is a line bundle on a K3 surface whose sheaf cohomology vanishes in
every degree then $\{\mathcal{O}_X,L\}$ is a strongly spherical collection.
\section*{Acknowledgements}
The author would like to thank Will Donovan for useful comments and
Tom Bridgeland and Richard Thomas for several helpful
suggestions on an early draft of the paper.
\bibliographystyle{amsplain}
|
{
"timestamp": "2012-06-27T02:04:51",
"yymm": "1206",
"arxiv_id": "1206.6019",
"language": "en",
"url": "https://arxiv.org/abs/1206.6019"
}
|
\section{Introduction}
A sequence $p_0 = 1, p_1, p_2, \dots$ of polynomials is a polynomial sequence of binomial type if it satisfies the identity
\begin{equation}
\left(\sum_{i=0}^{\infty}p_i(1)x^i\right)^n = \sum_{i=0}^{\infty}p_i(n)x^i
\end{equation}
Binomial-type sequences were introduced by Rota, Kahaner, and Odlyzko in 1975 \cite{RKO} and play an important role in the theory of umbral calculus. Outside of the context of umbral calculus, polynomial sequences of binomial type possess the useful property that they are completely determined by the sequence of their values when evaluated at a single point. Several important polynomial sequences, such as the Abel polynomials and the Touchard polynomials, are of binomial type.
In this paper we demonstrate that polynomial sequences of binomial type arise from a large class of problems occuring in graph theory. In particular, these sequences occur in problems where we wish to enumerate the number of ways to place some objects on a ``toroidal'' periodic structure such that no two overlap. One simple example of this phenomenon is the following. Take an $n \times n$ chessboard and identify opposite edges to make it toroidal. If we let $p_k(n^2)$ equal the number of ways to place $k$ dominoes on this grid (aligned with the grid's edges) such that no two dominoes overlap, then it turns out that, for sufficiently large $n$ the values $p_k(n^2)$ are given by a polynomial in $n^2$. Moreover, this sequence of polynomials (viewed as a sequence in $k$) is a polynomial sequence of binomial type.
Our methods allow us to easily generalize these results. The main result of our paper is a generalization of the above phenomenon to arbitrary sets of polyominos on toroidal grids of any dimension. We also demonstrate some interesting further generalizations of this result; for example, we show that the same binomial-type relation holds when we can assign arbitrary integer weights to polyominos and then count placements that have a total weight of $k$. In addition, we show that there is a very natural continuous analogue of these results concerning placing arbitrary bounded measurable `shapes' in a continous $d$-dimensional torus.
Finally, we apply these results to provide a proof of an open problem due to Stanley \cite{St86} concerning coefficients of the chromatic polynomial $\chi_n(x)$ of the two-dimensional toroidal grid graph. Much previous research into the chromatic polynomial $\chi_n(x)$ of toroidal grid graphs focused primarily on the asymptotics of this polynomial, particularly the limit $\lim_{n \rightarrow \infty} (\chi_n(x))^{1/n^d}$ (see, for instance \cite{Ba71, Ba82, Bi75, CS02, CS04, KE79, Lieb, Na71}). For example, it is known that for $d = 2$ and $x = 3$, this limit is equal to $(4/3)^{3/2}$.
The open problem due to Stanley asks to show that the coefficient of $x^{n^2-k}$ in $\chi_n(x)$ is (up to sign) for sufficiently large $n$ a polynomial in $n^2$, and that the polynomials for different $k$ form a polynomial sequence of binomial type. By using Whitney's broken-circuit theorem to reduce this to an overlap problem of the style above, we resolve this open problem (and in fact, provide a generalization that holds for chromatic polynomials of toroidal grid graphs of any positive dimension).
Our paper is structured as follows. In Section \ref{definitions}, we define some terminology that we use throughout this paper. In Section \ref{polynomial}, we prove that the problem of enumerating the number of non-overlapping placements does in fact give rise to a polynomial for sufficiently large $n$. We additionally show how to write these polynomials in a nice form reminiscent of certain generating functions. In Section \ref{ischemes}, we introduce the notion of an intersection schema and use it to prove our main theorem. In Section \ref{generalizations}, we discuss generalizations of our main result to the cases of assigning integer weights of polyominoes, continous tori, non-toroidal grids, and other types of lattices. Finally, in Section \ref{chrom}, we apply our main result along with Whitney's broken-circuit theorem to solve the open problem mentioned above.
\section{Background and Definitions}\label{definitions}
We begin with some graph-theoretic notation. We let $C_n$ denote the cycle graph on $n$ vertices, $P_n$ denote the path graph on $n$ vertices, and $C_{\infty}$ denote the doubly infinite path graph.
\begin{definition}
Given two graphs $G_1$ and $G_2$, we define the \textit{product graph} $G_1 \times G_2$ as follows. The vertices of $G_1 \times G_2$ are given by ordered pairs $(v_1, v_2)$ where $v_1 \in G_1$ and $v_2 \in G_2$. The two vertices $(v_1, v_2)$ and $(v'_1, v'_2)$ are adjacent if either $v_1 = v'_1$ and $v_2$ is adjacent to $v'_2$ in $G_2$ or $v_2 = v'_2$ and $v_1$ is adjacent to $v'_1$ in $G_1$. We write $G^r$ for the expression $G \times G \times \dots \times G$ (with $r$ copies of $G$).
\end{definition}
\begin{definition}
The \textit{$d$-dimensional toroidal grid graph of size $n$}, $T^{d}_n$, is the graph $(C_n)^d$. Similarly, the \textit{$d$-dimensional infinite toroidal grid graph} $T^{d}_{\infty}$ is the graph $(C_{\infty})^d$, and the \textit{$d$-dimensional grid graph of size $n$}, $L^{d}_n$, is the graph $(P_n)^d$.
\end{definition}
\begin{definition}
A \textit{$d$-dimensional figure} is a finite subset of vertices of $T^{d}_{\infty}$, up to translation. That is, two figures are considered equivalent if we can get from one to the other by adding a fixed integer vector to the coordinates of each of its vertices. We say a figure is of size $s$ if it contains $s$ vertices. We also say a figure is of girth $g$ if the maximum coordinate difference between two vertices in the figure is equal to $g$.
\end{definition}
We have defined figures above as subsets of $T^{d}_\infty$. However, it is clear that, for any specific figure, if $n$ is large enough (in particular, larger than the girth of the figure), then we can also view the figure as a subset of $T^{d}_n$ (up to translation). We will often abuse notation in this way by talking about ``placing'' figures on $T^{d}_n$. In such cases, we will always assume that we are taking $n$ large enough so that this makes sense.
We next define what it means for a sequence of polynomials to be of binomial type.
\begin{definition}
The sequence of polynomials $\lbrace p_i(n)\rbrace_{i\geq 0}$ is of \textit{binomial-type} if it satisfies the following three properties: i. $p_0(n) = 1$, ii. $p_i(0) = 0$ for $i>0$, and iii. the identity given by equation (\ref{eqbintype}) below holds for all nonnegative $n$.
\begin{equation}\label{eqbintype}
\left(\sum_{i=0}^{\infty}p_i(1)x^i\right)^n = \sum_{i=0}^{\infty}p_i(n)x^i
\end{equation}
An equivalent reformulation of our third condition is that the identity given by equation (\ref{eqbintype2}) below holds for all nonnegative $n$.
\begin{equation}\label{eqbintype2}
p_n(x+y) = \sum_{i=0}^{n}p_i(x)p_{n-i}(y)
\end{equation}
\end{definition}
Our definition of binomial-type differs slightly from the definition most often found in the literature, where equation (\ref{eqbintype2}) contains an additional factor of $\binom{n}{i}$. The two definitions are easily interchangeable, however; if $\lbrace p_i(n)\rbrace$ is a sequence of binomial-type under our definition, then $\lbrace i!p_i(n) \rbrace$ is a sequence of binomial-type under the traditional definition.
\section{Polynomiality}\label{polynomial}
In this section, we demonstrate that several sequences related to the number of ways to place a fixed set of figures on a lattice are eventually described by polynomials. More specifically, we have the following main result.
\begin{theorem}\label{polymain}
Let $S$ be a finite multiset of $d$-dimensional figures. Let $f_{S}(n)$ be the number of ways to place all of the figures in $S$ on $T^{d}_n$ such that none overlap (for a finite set of small values of $n$, there may be figures that are impossible to place on $T^{d}_n$; in this case, let $f_{S}(n) = 0$). Then there exists a positive integer $n_0$ and an integer polynomial $p(x)$ such that $f_{S}(n) = p(n^d)$ for all $n \geq n_0$.
\end{theorem}
Since there is some subtlety in dealing with multisets containing repeated indistinguishable figures, in the first half of this section (Subsection \ref{norepeats}) we prove this result only for sets of distinct figures. In the second half (Subsection \ref{yesrepeats}), we generalize to the case where repeats of figures are allowed.
\subsection{Without repeats}\label{norepeats}
In this subsection, we prove Theorem \ref{polymain} for the case where $S$ contains no repeated figures. In particular, we prove the following simpler result.
\begin{theorem}\label{polymainredux}
Let $S$ be a finite set of \textbf{distinct} $d$-dimensional figures. Let $f_{S}(n)$ be the number of ways to place all of the figures in $S$ on $T^{d}_n$ such that none overlap (for a finite set of small values of $n$, there may be figures that are impossible to place on $T^{d}_n$; in this case, let $f_{S}(n) = 0$). Then there exists a positive integer $n_0$ and an integer polynomial $p(x)$ such that $f_{S}(n) = p(n^d)$ for all $n \geq n_0$.
\end{theorem}
Throughout this subsection and the next, we will repeatedly make use of the following notion of an overlap graph.
\begin{definition}
An \textit{overlap graph} is a graph $G$ whose vertices are labelled by $d$-dimensional figures (for some $d$). A placement of these figures on $T^{d}_n$ (or $T^{d}_{\infty}$) is \textit{consistent} with $G$ if, whenever figures $f_1$ and $f_2$ are adjacent in $G$, they overlap in $T^{d}_n$ (or $T^{d}_{\infty}$).
\end{definition}
Note that if a placement of figures is consistent with an overlap graph, so are all translations of this placement of figures. This inspires the following definition.
\begin{definition}
A \textit{configuration} of $d$-dimensional figures is a placement of $d$-dimensional figures on $T^{d}_n$ (or $T^{d}_{\infty}$) where two configurations are equivalent if they are translations of each other in $T^{d}_{n}$ (or $T^{d}_{\infty}$. A configuration $c$ is \textit{consistent} with a graph $G$ if any of its placements are consistent with $G$; in this case, we write $c \unlhd G$.
\end{definition}
If $f_1$ and $f_2$ are not adjacent in $G$, they may or may not overlap in $T_n^{d}$ (or $T^{d}_{\infty}$); only one direction of the above implication holds. In addition, for now we will assume that the vertices of our overlap graphs are labelled with distinct $d$-dimensional figures; we will lift this constraint in the following subsection.
We next prove three useful lemmas about overlap graphs.
\begin{lemma}\label{polylem1}
If an overlap graph $G$ is connected, then, there are only finitely many configurations of these figures on $T^{d}_{\infty}$ consistent with $G$. We call this number $v(G)$.
\end{lemma}
\begin{proof}
Since $G$ is connected, there exists a path of edges of $G$ between any two vertices of $G$. This implies that, for any two figures $f_1$ and $f_2$ in a consistent placement of these figures on $T^{d}_{\infty}$, we can construct a sequence of figures starting at $f_1$ and ending at $f_2$ such that each figure intersects the next figure in the sequence.
Now, since each of the figures has finite size, and since there are a finite number of figures, this implies that the maximum distance (along edges of the graph) between any two points belonging to figures in our placement is bounded. Since there are only finitely many ways to place a finite number of figures in a bounded region of $T^{d}_{\infty}$, this establishes that $v(G)$ is finite.
\end{proof}
\begin{lemma}\label{polylem2}
Let $G$ be a connected overlap graph. Then the number of placements of figures on $T^{d}_{n}$ consistent with $G$ is equal to $v(G)n^d$ for sufficiently large $n$.
\end{lemma}
\begin{proof}
By Lemma \ref{polylem1}, we know that there are $v(G)$ distinct consistent configurations of these figures on $T^{d}_{\infty}$. For sufficiently large $n$, it will be possible to embed each of these $v(G)$ configurations in $T^{d}_{n}$. Finally, for each choice of consistent configuration, there are $n^d$ possible translations in $T^{d}_{n}$. Therefore, there are a total of $v(G)n^d$ consistent placements on $T^{d}_n$ for sufficiently large $n$.
\end{proof}
\begin{lemma}\label{polylem3}
Let $G$ be an overlap graph with connected components $G_1, G_2, \dots, G_r$. Then, the number of placements of figures on $T^{d}_n$ consistent with $G$ is equal to $v(G_1)v(G_2)\dots v(G_r)n^{rd}$.
\end{lemma}
\begin{proof}
We first note that we can treat all the connected components ``independently''. More specifically, for each $i$ let $P_i$ be a placement of figures on $T^{d}_n$ consistent with $G_i$. Then the placement of figures given by the union of the $P_i$ is a placement consistent with $G$ (and moreover, all consistent placements with $G$ can be written uniquely in such a way). This follows directly from the fact that, since there are no edges between figures belonging to different connected components of $G$, there are also no overlap constraints they must satisfy (in addition to the fact that all of our figures are distinct, so we can identify which connected component of $G$ they must belong to).
Therefore, since by Lemma \ref{polylem2}, there are $v(G_i)n^{d}$ placements on $T^{d}_n$ consistent with $G_i$ (for sufficiently large $n$), overall there will be
\begin{equation}
\prod_{i=1}^{r}v(G_i)n^{d} = \left(\prod_{i=1}^{r}v(G_i)\right)n^{rd}
\end{equation}
\noindent
placements of figures on $T^{d}_n$ consistent with $G$ (for sufficiently large $n$), as desired.
\end{proof}
With these lemmas, the proof of Theorem \ref{polymainredux} reduces to a straightforward application of the principle of inclusion-exclusion.
\begin{proof}[\textbf{Proof of Theorem~\ref{polymainredux}}]
Label the figures in $S$ as $f_1, f_2, \dots, f_m$. We wish to count the number of placements of these figures on $T^{d}_n$ such that no two figures overlap. Thus, for each pair $1 \leq i < j \leq m$, let $E(i,j)$ be the set of placements of these figures where $f_i$ and $f_j$ intersect, and let $U$ be the set of all placements of these figures. Finally, for convenience of notation, let $\mathcal{P}$ be the set of all pairs $(i,j)$ where $1\leq i < j \leq m$; if $p = (i,j)$, we will also let $E(p)$ stand for $E(i,j)$.
Then, by the principle of inclusion-exclusion, the number of placements where no two figures overlap is equal to
\begin{equation}\label{pieeq}
|U| - \sum_{p_1\in\mathcal{P}}|E(p_1)| + \sum_{p_1, p_2 \in\mathcal{P}}|E(p_1)\cap E(p_2)| - \sum_{p_1,p_2,p_3 \in\mathcal{P}}|E(p_1)\cap E(p_2) \cap E(p_3)| + \cdots
\end{equation}
Hence, to show that this is eventually a polynomial in $n^d$, it suffices to show that each of the individual terms is eventually a polynomial in $n^d$. Now, $|E(p_1)\cap E(p_2) \cap \dots \cap E(p_k)| = |E(i_1, j_1) \cap E(i_2, j_2) \cap \dots \cap E(i_k, j_k)|$ is equal to the number of placements where figure $f_{i_r}$ intersects figure $f_{j_r}$ for each $r$ between $1$ and $k$. But this is simply equal to the number of placements consistent with the overlap graph $G$ which contains an edge between $f_{i_r}$ and $f_{j_r}$ for each $r$ between $1$ and $k$. By Lemma \ref{polylem3}, this is eventually a polynomial in $n^d$.
We also have the term $|U|$, consisting of all possible placements. But this is also just equal to the number of placements consistent with the overlap graph $G$ containing no edges, so once again by Lemma \ref{polylem3}, this is also a polynomial in $n^d$ (in fact, we have that $|U| = (n^{d})^m$). This concludes the proof.
\end{proof}
For a connected overlap graph $G$, let $a(G) = (-1)^{|E|}v(G)$. By substituting values into Equation \ref{pieeq} from Lemma \ref{polylem3}, we have the following corollary.
\begin{corollary}
Let $N = n^d$. Then, for sufficiently large $n$, the function $f_{S}(n)$ defined in Theorem \ref{polymainredux} is equal to
\begin{equation}\label{isform}
\sum_{r = 1}^{m}\sum \dfrac{1}{r!}a(g_1)a(g_2)\cdots a(g_r)N^r
\end{equation}
\noindent
where the inner sum is over all \textbf{ordered} $r$-tuples of connected overlap graphs that union to an overlap graph for the set $S$ (equivalently, the union of the sets of figures corresponding to the vertices of the $g_i$ is equal to the set $S$).
\end{corollary}
Note that since $S$ contains only distinct figures, we could easily remove the factor of $1/r!$ in equation (\ref{isform}) and instead sum over all unordered $r$-tuples. However, for reasons to be explained in Section \ref{ischemes}, it is more convenient to write our polynomial in this form.
\subsection{With repeats}\label{yesrepeats}
In the previous section, we proved Theorem \ref{polymain} for the specific case where $S$ contained no repeated indistinguishable figures. In the case that we have several of the same figure, certain details in the above proof (in particular, Lemma \ref{polylem3}) fail to hold. For example, if our set $S$ contains two identical figures, the number of total possible placements is no longer $n^{2d}$ (nor is it $n^{2d}/2!$, since this is not even always an integer). Instead, it is equal to $(n^{2d} + n^{d})/2$; the extra $n^d$ term arises from the fact that our two indistinguishable figures can occupy exactly the same location.
However, if we have repeated \textit{distinguishable} figures, all of the logic in the previous section continues to hold. For instance, if we have two identical figures, but of which one is colored red and the other blue, then there are again $n^{2d}$ possible placements of these two figures. This observation gives rise to a simple proof of Theorem \ref{polymain}.
\begin{proof}[\textbf{Proof of Theorem~\ref{polymain}}]
Assume that $S$ contains $c_i$ copies of figure $f_i$, for each $1 \leq i \leq m$.
For each group of indistinguishable repeated figures in $S$, ``color'' them to make them distinguishable. Then the proof of Theorem \ref{polymainredux} implies that the number of ways to place these figures on $T^{d}_{n}$ such that no two figures overlap is eventually some polynomial $p(n^d)$ for large enough $n$.
But now, if we ignore the different colors, each configuration where no two figures overlap is counted exactly $c_1!c_2!\dots c_m!$ times (keep in mind that, since figures cannot overlap in these configurations, they cannot occupy exactly the same location). Therefore in the case of indistinguishable repeated figures, the number of placements of these figures such that no two figures overlap is eventually $p(n^d)/(c_1!c_2!\dots c_m!)$, which is also a polynomial in $n^d$.
\end{proof}
For reasons that will be explained in the next section, we would also like to write this polynomial in the same form as equation (\ref{isform}). In order to construct the correct function $a(g)$, we must introduce some more notation.
\begin{definition}\label{Osets}
For any configuration $c$, we can partition the figures of $c$ into $k$ maximal sets $O_i$ such that all the figures in $O_i$ are identical and overlap completely. Then the \textit{weight} $w_c$ of a configuration $c$ is defined to equal $\prod_{i=1}^{k}(o_i!)^{-1}$, where $o_i = |O_i|$.
\end{definition}
\begin{definition}\label{alphadef}
In a connected overlap graph $G$, assume that there are a total of $c_i$ (vertices labeled with) figures of type $f_i$ for each $1 \leq i \leq m$. Then we define $\alpha(G) = |\mathrm{Aut}(G)|^{-1}\prod_{i=1}^{m}c_i!$, where $\mathrm{Aut}(G)$ is the group of automorphisms of the graph $G$ that preserve labelling (that is, they send vertices labelled with figures of type $f_i$ to vertices labelled with figures of type $f_i$).
\end{definition}
Note that if we let $H$ be the group of all permutations of the vertices of $G$ which send figures of type $f_i$ to figures of type $f_i$, then $\alpha(G)$ can be equivalently defined as $|H|/|\mathrm{Aut}(G)|$. Similarly, this is just the number of non-isomorphic ways to color each set of $c_i$ figures of type $f_i$ with $c_i$ distinguishable colors. We will make use of this fact in the proof of the following theorem.
\begin{theorem}\label{mainform}
For a connected overlap graph $g$, define
\begin{equation} \label{genA}
a(g) = (-1)^{|E|}\alpha(g)\sum_{c \unlhd g}w_c
\end{equation}
\noindent
where the sum is over all configurations $c$ consistent with $g$. Then, as before, we have that
\begin{equation}\label{isform}
f_{S}(n) = \sum_{r = 1}^{m}\sum \dfrac{1}{r!}a(g_1)a(g_2)\cdots a(g_r)N^r
\end{equation}
\noindent
where the inner sum is over all \textbf{ordered} $r$-tuples of connected overlap graphs that union to an overlap graph for the set $S$.
\end{theorem}
\begin{proof}
Let $p$ be any placement of the figures of $S$ onto $T^{d}_n$. We will compute the number of times this placement is counted in the above sum, and show that this sum reduces to the similar sum that occurs in the case of completely distinguishable figures (as in Theorem \ref{polymainredux} above).
To do this, for our placement $p$ of the figures in $S$, as in Definition \ref{Osets}, partition $S$ into $k$ maximal sets $O_i$ such that all the figures in $O_i$ are identical and overlap completely (and let $o_i = |O_i|$). In addition, let there be $c_i$ figures of type $f_i$ for each $i$ between $1$ and $m$.
Next, note that by the definition of $\alpha(G)$, for any connected overlap graph $g$, we can write $a(g) = \sum a'(h)$, where the sum is over all graphs $h$ obtained by coloring all the figures of type $f_i$ distinguishably (note that there are $\alpha(g)$ such graphs). Our new function $\alpha'(h)$ is now given just by $\alpha'(h) = (-1)^{|E|}\sum_{c \unlhd h} w_c$ (where for a configuration $c$ to be consistent with $h$, it simply has to be consistent with the original graph $g$). We can therefore write $f_{S}(n)$ as
$$f_{S}(n) = \sum_{r = 1}^{m}\sum \dfrac{1}{r!}a'(h_1)a'(h_2)\cdots a'(h_r)N^r$$
\noindent
where this new sum is over ordered $r$-tuples of these additionally colored overlap graphs $h_i$. Now, let us consider the terms of this sum that contribute to the total count for our placement $p$. Specifically, we are looking at terms where some subset $p_i$ of $p$ is counted in the placements belonging to the term $a'(h_i)N$, and such that the union of all the $p_i$ is $p$. Let us write one such term as
$$t_p = \dfrac{1}{r!}w_{p_1}w_{p_2}\cdots w_{p_r}$$
\noindent
where $w_{p_{i}}$ is the weight of placement $p_i$ (which is the same as the weight of the configuration $c_i$ to which $p_i$ belongs). Now, note that the overlap sets $O_i$ for our overall placement $p$ are distributed among these $r$ placements. Therefore, for each $1 \leq j \leq r$, let $O_{ij}$ be the subset of $O_{i}$ that belongs to subplacement $p_j$, and let $o_{ij} = |O_{ij}|$. Note then that $w_{p_i} = (o_{1i}!o_{2i}!\dots o_{ki}!)^{-1}$. We thus have that
$$t_p = \prod_{i=1}^{k}\prod_{j=1}^{r} (o_{ij}!)^{-1}.$$
Next, note that $o_{i}!\prod_{j=1}^{r} (o_{ij}!)^{-1}$ is the number of ways to split $o_i$ distinguishable colors and for each $j$ assign $o_{ij}$ of these colors to the placement $p_j$. Therefore, whereas $t_p$ was counting the (weighted) number of terms where we could distinguish between identical figures in each connected overlap subgraph, if we multiply all of these terms by $\prod_{i=1}^{k}o_i!$, we will now be counting over terms where we can distinguish among sets of identical figures that overlap completely.
Next, note that since each set $O_i$ contains figures of the same type, the expression
$$\dfrac{\prod_{i=1}^{m}c_i!}{\prod_{i=1}^{k}o_i!}$$
\noindent
counts the number of ways to, for each $i$ between $1$ and $m$, split $c_i$ distinct colors and assign them to all the sets $O_i$ containing figures of type $f_i$. Therefore, if we again multiply all of these terms by $\prod_{i=1}^{m}c_i!/\prod_{i=1}^{k}o_i!$, we are now counting over terms where we can distinguish between any two figures; in particular, our sum is now exactly the same as it is in the above proof of Theorem \ref{polymain}.
Altogether, we have multiplied the terms we are considering by a net factor of
$$\left(\dfrac{\prod_{i=1}^{m}c_i!}{\prod_{i=1}^{k}o_i!}\right)\prod_{i=1}^{k}o_i! = \prod_{i=1}^{m}c_i!$$
But note that this does not depend on the specific placement $p$ we have chosen at all! Hence, we have shown that
$$f_{S}(n)\prod_{i=1}^{m}c_i! = f_{\bar{S}}(n)$$
\noindent
where $\bar{S}$ is constructed from $S$ by coloring all the figures so that they are distinguishable. As shown above (again in the proof of Theorem \ref{polymain}), $f_{\bar{S}}(n)/\prod_{i=1}^{m}c_i!$ is exactly $f_{S}(n)$, and therefore our proof is complete.
\end{proof}
\section{Intersection Schemas}\label{ischemes}
In the previous section, we showed that we can write the function $f_{S}(n)$ in the form given in equation (\ref{isform}). In particular, we have that
$$f_{S}(n) = \sum_{r = 1}^{m}\sum \dfrac{1}{r!}a(g_1)a(g_2)\cdots a(g_r)N^r$$
\noindent
where the function $a$ is given as in equation (\ref{genA}).
In this section, we will prove that a large class of functions written in this form give rise to polynomial sequences of binomial type. To do this, we will define an object called an \textit{intersection schema}, which will generalize many of the properties of overlap graphs we encountered in the previous section.
\begin{definition}
A \textit{weighted set} is a set $S$ (either finite or infinite) along with a weight function $w: S \rightarrow \mathbb{Z}^+$ (from $S$ to the positive integers) such that for any $W$, there are only finitely many elements $x$ of $S$ such that $w(x) \leq W$.
\end{definition}
\begin{definition}
Given a weighted set $S$, the set of $S$\textit{-labeled graphs} is the set of graphs where each vertex is labelled by an element in $S$. We shall denote this set as $LG(S)$. We define the weight $w(g)$ of an element $g$ of $LG(S)$ to simply be the sum of the weights of its labels. We will also denote the subset of $LG(S)$ consisting of connected $S$-labeled graphs as $LCG(S)$. Note that we consider two graphs in $LG(S)$ to be equivalent if they are equivalent under a graph isomorphism that sends labeled vertices to similarly labeled vertices.
\end{definition}
\begin{definition}\label{ISdef}
An \textit{intersection schema} is a weighted set $S$ along with a function $a: LCG(S) \rightarrow \mathbb{C}$. Given an intersection schema, we define the polynomial $q_i(N)$ as:
$$q_i(N) = \sum_{r = 1}^{\infty}\sum \dfrac{1}{r!}a(g_1)a(g_2)\cdots a(g_r)N^r$$
\noindent
where the inner sum is over all \textbf{ordered} $r$-tuples $(g_1,g_2,\dots,g_r)$ of elements of $LCG(S)$ such that $\sum_{j=1}^{r}w(g_j) = i$. By default, we set $q_0(N) = 1$.
\end{definition}
Finally, we will need the following binomial identity:
\begin{lemma}\label{binlemma}
We have that
$$n^k = \sum_{m_1+\dots+m_r = k}\dfrac{k!}{m_1!m_2!\cdots m_r!}\binom{n}{r}$$
\noindent
where the sum is over all compositions of $k$.
\end{lemma}
\begin{proof}
The left hand side is the number of ways to color a set of $k$ items with $n$ colors. The right hand side counts this same number; here the $m_i$ correspond to sizes of sets of items colored the same color, the first multinomial coefficient corresponds to the number of ways to distribute the $k$ items into these groups of size $m_i$, and the binomial coefficient corresponds to the number of ways to choose $r$ colors for these $r$ groups out of the total $n$ colors.
\end{proof}
We can now state and prove our main theorem.
\begin{theorem}\label{mainthm}
Let $\mathcal{I}$ be an intersection schema. We then have:
$$\left(\sum_{i=0}^{\infty}q_i(1)x^i\right)^N = \left(\sum_{i=0}^{\infty}q_i(N)x^i\right)$$
\end{theorem}
\begin{proof}
Let
$$F(x) = \sum_{i=0}^{\infty}q_i(1)x^i$$
\noindent
and let
$$F_N(x) = \sum_{i=0}^{\infty}q_i(N)x^i$$.
By the formula for $q_i(n)$ given in Definition \ref{ISdef}, we can rewrite $F(x)$ as:
$$F(x) = 1+\sum_{r=1}^{\infty}\sum \dfrac{1}{r!}a(g_1)a(g_2)\dots a(g_r)x^{w(g_1)+w(g_2)+\dots+w(g_r)}$$
\noindent
where the inner sum is over all ordered $r$-tuples $(g_1, g_2, \dots, g_r)$ of elements of $LCG(S)$.
We will now show by comparing terms that $F(x)^N = F_N(x)$. For sake of convenience, we will assume that the $a(g)$ are arbitrary non-commuting variables; i.e. that we do not necessarily have $a(g_1)a(g_2) = a(g_2)a(g_1)$ (of course, since $a(g_i) \in \mathbb{C}$, they do commute, but we will remove this restriction for now).
A general term in $F_n(x)$ looks like:
$$\dfrac{n^r}{r!}a(g_1)a(g_2)\dots a(g_r)x^{w(g_1)+w(g_2)+\dots+w(g_r)}$$
We will show that the coefficient of $a(g_1)a(g_2)\dots a(g_r)x^{w(g_1)+w(g_2)+\dots+w(g_r)}$ in $F(x)^n$ is also $\dfrac{n^r}{r!}$, thus completing the proof. To see this, first note that since we are assuming the $a(g)$s do not commute, the terms in $F(x)$ which could contribute to this coefficient in $F(x)^n$ are of the form
$$\dfrac{1}{j!}a(g_i)a(g_{i+1})\dots a(g_{i+j-1})x^{w(g_i)+w(g_{i+1})+\dots +w(g_{i+j-1})}$$
\noindent
(in other words, consecutive blocks of $a(g_i)$s). But now, it follows directly from expansion that the coefficient of
$$a(g_1)a(g_2)\dots a(g_r)x^{w(g_1)+w(g_2)+\dots+w(g_r)}$$
\noindent
in $F(x)^n$ is equal to
$$\sum_{m_1+\dots+m_s = r}\dfrac{1}{m_1!m_2!\dots m_s!}\binom{n}{s}$$
\noindent
which by Lemma \ref{binlemma} is simply equal to $\dfrac{n^r}{r!}$, as desired. (The ordered partitions arise from the different ways to divide the product $a(g_1)a(g_2)\dots a(g_r)$ into consecutive ``blocks'').
\end{proof}
\begin{remark}
Interestingly enough, this proof works even if the $a(g)$ variables do not commute, so it holds even when $a$ is a function from $LCG(S)$ to $GL_n(\mathbb{C})$ (or any group). The author has not found any useful applications of this fact, however.
\end{remark}
We can now directly apply this theorem about intersection schemas to the case of non-overlapping placements.
\begin{theorem}\label{mainresult}
Let $S$ be a (possibly infinite) set of connected $d$-dimensional figures, and let $p_{k}(n^d)$ be the number of ways to place some collection of these figures (possibly using the same figure in $S$ repeatedly) that have a total of $k$ edges on $T^{d}_n$. Then the $p_{k}(n^d)$ are eventually polynomials in $n^d$, and these polynomials form a sequence of binomial type.
\end{theorem}
\begin{proof}
Define the following intersection schema. Our weighted set $S$ is just the set $S$ of $d$-dimensional figures, where the weight of a figure is simply its number of edges. The function $a$ is defined as in equation (\ref{genA}); note that graphs in $LCG(S)$ are just connected overlap graphs for some set of figures. Then Theorem \ref{mainform} shows that for each $k$, $p_{k}(n^d)$ is eventually equal to $q_{k}(n^d)$. Our main theorem about intersection schemas (Theorem \ref{mainthm}) then shows that the polynomials $q_{k}(n^d)$ form a sequence of binomial type, as desired.
\end{proof}
\section{Generalizations}\label{generalizations}
Up until now, this paper has been concerned only with placing $d$-dimensional figures on $d$-dimensional toroidal grid graphs. However, the machinery of intersection schemas and inclusion-exclusion on overlap graphs allow us to prove a much wider range of results. In fact, it seems that any problem involving placing finite non-overlapping collections of subgraphs on larger and larger periodic graphs gives rise to an eventual polynomial sequence; in addition, if the underlying periodic graphs are (in some sense) ``toroidal'', then this polynomial sequence is of binomial type. In this section, we will consider some generalizations of Theorems \ref{polymain} and \ref{mainresult} that capture this idea.
\subsection{Other weights}
In the proof of Theorem \ref{mainresult}, we assigned the weight of a figure to be its number of edges. However, since Theorem \ref{mainthm} works for any valid weight function, we can essentially assign whatever weights we want to figures (as long as not too many figures have small weight). We can formalize this in the following statement.
\begin{theorem}\label{mainresultwts}
Let $S$ be a (possibly infinite) set of connected $d$-dimensional figures, and let $w$ be a function from $S$ to $\mathbb{Z}^{+}$ such that for any $x$, there are only finitely many figures $f \in S$ such that $w(f) = x$. Let $p_{k}(n^d)$ be the number of ways to place some collection of these figures (possibly using the same figure in $S$ repeatedly) that have a total of $k$ edges on $T^{d}_n$. Then the $p_{k}(n^d)$ are eventually polynomials, and these polynomials form a sequence of binomial type.
\end{theorem}
For example, all the following polynomial sequences are polynomial sequences of binomial-type:
\begin{itemize}
\item
Let $p_{k}(n^2)$ be the number of ways to place $a$ L-shaped triominos and $b$ T-shaped pentominos on an $n \times n$ toroidal grid such that $7a+2b=k$. Then $p_{k}(n^2)$ is eventually a polynomial sequence of binomial type.
\item
Let $p_{k}(n^d)$ be the number of ways to place some number of $d$-dimensional figures on a $d$-dimensional toroidal grid graph such that the sum of the squares of the number of edges over all figures equals $k$. Then $p_{k}(n^d)$ is eventually a polynomial sequence of binomial type.
\item
Let $p_{k}(n^d)$ be the number of ways to place some number of $d$-dimensional figures on a $d$-dimensional toroidal grid graph such that the total number of edges in the figures equals $k$, and then to color each figure that has at least $3$ edges with one of $50$ colors. Then $p_{k}(n^d)$ is eventually a polynomial sequence of binomial type.
\end{itemize}
\subsection{Continuous variant}
We can easily adapt intersection schemas to handle a continuous variant of our problem. To do this, we replace the $d$-dimensional toroidal grid graph $T^{d}_n$ with a continuous $d$-dimensional torus of side length $n$, and the concept of a $d$-dimensional figure with a bounded measurable set in $d$-dimensional Euclidean space. Then instead of counting the number of ways to place some number of objects so that they do not overlap, we instead consider the total measure of non-overlapping placements in state space.
For the case where our collection of figures contains only one object, we have the following nice probabilistic result.
\begin{theorem}
Let $\mathcal{S}$ be a bounded measurable set in $d$-dimensional Euclidean space. Let $p_k(n^d)$ be the probability that no two copies intersect when we place $k$ copies of $\mathcal{S}$ independently and uniformly at random inside a $d$-dimensional torus of side-length $n$. Then $n^{dk}p_k(n^d)$ is eventually a polynomial for each $k$, and these polynomials form a sequence of binomial-type.
\end{theorem}
\subsection{Non-toroidal grids}
We can also ask what happens if, instead of placing our figures on the toroidal grid graph $T^{d}_n$, we place them on the regular grid graph $L^{d}_n$. It turns out that in this case we lose the binomial-type property. However, the number of possible placements is still a polynomial (in $n$ instead of $n^d$, however), and we therefore have the following analogue to Theorem \ref{polymain}.
\begin{theorem}\label{polymainnt}
Let $S$ be a finite multiset of $d$-dimensional figures. Let $f_{S}(n)$ be the number of ways to place all of the figures in $S$ on $L^{d}_n$ such that none overlap. Then there exists a positive integer $n_0$ and an integer polynomial $p(x)$ such that $f_{S}(n) = p(n)$ for all $n \geq n_0$.
\end{theorem}
\begin{proof}
We follow the proof of Theorem \ref{polymain}, with the slight change that for each configuration, instead of there being $n^d$ valid translations, there are only $(n-g_1)(n-g_2)\dots(n-g_d)$ valid translations, where $g_i$ is the girth of the configuration in dimension $i$ (that is, $g_i(c) = \max_{x, y \in c} |x_i - y_i|$).
\end{proof}
We can extend this even farther. By the same reasoning as in the proof of Theorem \ref{polymain}, the above result holds for grid ``rectangles'' with unequal dimensions (like $n \times 2n$ rectangles, or $4n \times 5n \times 6n$ boxes). In fact, we have the following general result:
\begin{theorem}\label{polymainntgen}
Let $G$ be a graph formed by taking a finite subset of the unit $d$-dimensional cells comprising $T^{d}_{\infty}$. Let $G^n$ be the graph obtained by replacing each unit cell by a cell of length $n$ divided regularly into $n^d$ unit cells. Let $S$ be a finite multiset of $d$-dimensional figures, and let $f_{S}(n)$ be the number of ways to place all of the figures in $S$ on $G^n$ such that none overlap. Then there exists a positive integer $n_0$ and an integer polynomial $p(x)$ such that $f_{S}(n) = p(n)$ for all $n \geq n_0$.
\end{theorem}
\begin{proof}
Again, by following the same reasoning in \ref{polymain}, it suffices to show that the number of ways to place any one figure on $G^n$ is a polynomial in $n$.
To show this, call our figure $f$, and divide $G^n$ into copies of $L^{d}_n$ (in the same way that we can divide $G^1$ into unit $d$-dimensional cells). The number of ways to place $f$ in $L^{d}_n$ is a polynomial in $n$ (namely, the same polynomial used in the proof of Theorem \ref{polymainnt} above), so the total number of ways to place $f$ so that it stays entirely within one of these copies of $L^{d}_n$ is also a polynomial in $n$. Now, by similar reasoning, the number of ways this figure can intersect exactly $c$ of these $n$-dimensional cells is a polynomial in $n$ (since for each specific choice of $c$ cells, the number of ways this figure can intersect exactly those cells will be a polynomial in $n$). By summing all of these polynomials (and there are a finite number of these, since $G$ contains a finite number of unit cells), we find that the total number of ways to place $f$ in $G^n$ is a polynomial in $n$, as desired.
\end{proof}
\subsection{Other lattices}
Finally, the only discrete lattice we have considered is the square lattice. However, analogues of all of the above theorems exist for other lattices, such as triangular lattices and hexagonal lattices (and by exactly the same logic).
\section{Application to Chromatic Polynomials}\label{chrom}
The following open problem appears as Exercise 4.82 in \textit{Enumerative Combinatorics, vol. 1}.
\begin{theorem}\label{chrommain}
Let $\chi_n(x)$ be the chromatic polynomial of the $n\times n$ toroidal grid graph, and let $q_k(n^2)$ be the coefficient of $x^{n^2-k}$ in $\chi_n(x)$. Then $(-1)^kq_k(n^2)$ is eventually a polynomial in $n^2$, and this sequence of polynomials is of binomial-type.
\end{theorem}
In this section, we will provide a proof of this theorem, thus resolving this open problem. In addition, we will prove that the above claim holds not just for the $n \times n$ toroidal grid graph but for $T^{d}_n$, for any number of dimensions $d$.
To do this, we will reduce the problem of computing the coefficient of $x^{n^2-k}$ in $\chi_n(x)$ to a placement problem, and then apply Theorem \ref{mainresult}. Our main tool for doing this will be Whitney's broken-circuit theorem, stated below.
\begin{definition}
In a graph $G = (V,E)$ with a total ordering on the edges, a \textit{broken circuit} is a subset of $E$ formed by taking a cycle in $G$ and removing the largest edge (with respect to the ordering).
\end{definition}
\begin{theorem}
\textbf{(Whitney's broken-circuit theorem)} \label{whitthm}
Let $G$ be a finite graph with a strict ordering on the edge set $E$. Then, for $n$ between $0$ and $|V|$ inclusive, the coefficient of $\lambda^{|V| - k}$ in $\chi_{G}(\lambda)$ is equal to $(-1)^{k}$ times the number of $k$-element subsets of $E$ which do not contain any broken-circuit of $G$ as a subset.
\end{theorem}
\begin{proof}
See \cite{Wh32}.
\end{proof}
It would be ideal if we could choose as our set $S$ of figures the set of connected $d$-dimensional figures which do not contain any broken-circuits. Unfortunately, the definition of broken-circuit depends on the ordering of the edges in the graph. Fortunately, we can choose an ordering of edges on $T^{d}_{n}$ that largely remedies this problem.
\begin{definition}
In the graph $T^{d}_{n}$, we call an ordering of the edge set $E$ \textit{natural} if it satisfies the following properties:
\begin{enumerate}
\item
Each segment parallel to $u_i$ (where $u_i$ is the unit vector in dimension $i$) for $i\geq 2$ occurs before all edges parallel to $u_1$ (call these edges horizontal).
\item
If a horizontal edge $e$ connects points $(x_1, x_2, \dots, x_n)$ and points $(x_1+1, x_2, \dots, x_n)$, let the \textit{projection} of edge $e$, $p(e)$, be the $(n-1)$-tuple $(x_2, x_3, \dots, x_n)$. To compare two horizontal edges $e_1$ and $e_2$, let the larger edge be the edge with the lexicographically later projection vector.
\end{enumerate}
A natural ordering of the edge set of the graph $T^{d}_{\infty}$ is defined in the same way.
\end{definition}
\begin{definition}
We say a figure is \textit{locally good} if its embedding in $T^{d}_{\infty}$ contains no broken-circuit under the natural edge ordering (note that if a translate of some subset of $T^{d}_{\infty}$ contains a broken-circuit iff the subset contains a broken-circuit, by construction of the natural edge ordering). We say that the placement of a figure in $T^{d}_{n}$ is \textit{globally good} if the corresponding subset of $T^{d}_n$ contains no broken-circuit under the natural edge ordering. If a figure is not locally/globally good, then it is locally/globally \textit{bad}.
\end{definition}
Now, we can let $S$ be the set of all locally good figures. However, note that it is possible to place a figure that is locally good on $T^{d}_{n}$ such that it is globally bad (for example, for $d = 2$, we can achieve this in certain cases by placing it so that it intersects the vertical line $x_2 = n$). Similarly, it is possible to place a figure which is locally bad on $T^{d}_{n}$ so that it is globally good. The following theorem will allow us to ignore such cases.
\begin{theorem} \label{convthm}
The total number of ways to place a globally bad cycle-free figure with $k$ edges on $T^{d}_{n}$ (over all possible figures with $k$ edges) is equal to the number of ways to place a locally bad cycle-free figure with $k$ edges on $T^{d}_{n}$.
\end{theorem}
\begin{proof}
We will exhibit a bijection between these two sets. Assume we have a figure $f$ (with $|E| = k$) which is cycle-free but globally bad. Since it is globally bad, it must contain some number of broken-circuits (under the natural edge ordering for $T^{d}_{n}$). Let the number of broken-circuits be $b$, and let $e_i$ be the edge needed to make the $i$th broken circuit a cycle. Note first that we cannot have $e_i = e_j$ for $i \neq j$, because if this were the case, then there would be two distinct paths between the endpoints of $e_i$ in $f$, which would imply that there is a cycle in $f$. Thus the $e_i$ comprise $b$ different edges.
Let $\bar{f}$ be the graph formed by adding all of these edges to $f$ (so $\bar{f}$ now has $k+b$ edges). Now, consider $\bar{f}$ as a subgraph of $T^{d}_{\infty}$ with its natural edge ordering. Let $f'$ be the minimum spanning tree of $\bar{f}$, where we let the weight of the $r$th largest edge of $\bar{f}$ be $r$. We now claim that $f'$ is locally bad (it is cycle-free since it is a tree). To see this, note first that since $f'$ and $f$ are both spanning trees of $\bar{f}$, they both must have the same number $k$ of edges. Next, let $e'_1, e'_2, \dots, e'_b$ be the $b$ edges belonging to $\bar{f}$ but not to $f'$. Note that (by the properties of minimum spanning trees) if we add in $e'_i$ for any $i$, we will construct a unique simple cycle; moreover (again by the properties of minimum spanning trees), $e'_i$ will have the heaviest weight in this cycle. This implies that this set of edges in $f'$ (minus $e'_i$) forms a broken circuit under the local natural edge ordering, so $f'$ is locally bad (and in fact, it contains $b$ broken-circuits under this edge ordering).
This procedure is a map which sends placements of globally bad cycle-free figures $f$ with $k$ edges to placements of locally bad cycle-free figures $f'$ with $k$ edges. Now, note that we can invert this map via the following procedure, thus showing that this map is a bijection. As before, we take the $b$ broken-circuits and the $b$ edges $e'_i$ required to make the $i$th broken circuit a cycle. We then add these $b$ edges to $f'$ to construct $\bar{f}$, and once we do this we let $f$ be the minimum spanning tree of $\bar{f}$ with respect to the natural edge ordering of $T^{d}_{n}$. To see that this restores the original $f$, note first that the figure $\bar{f}$ constructed in going from $f$ to $f'$ contains exactly the same edges as the figure $\bar{f}$ constructed in going back from $f'$ to $f$. Next, note that none of the edges $e_i$ can belong to the minimum spanning tree of $\bar{f}$ with respect to the natural edge ordering of $T^{d}_n$; this is since each such edge $e_i$ is the largest edge in a cycle, and such edges never occur in minimum spanning trees. But since $\bar{f}$ has $k+b$ edges, and there are $b$ edges $e_i$, this must mean that this minimum spanning tree is exactly $f$, as desired.
\end{proof}
\begin{corollary} \label{lgcor}
The total number of ways to place a locally good cycle-free figure with $k$ edges on $T^{d}_{n}$ (over all possible figures with $k$ edges) is equal to the number of ways to place a globally good cycle-free figure with $k$ edges on $T^{d}_{n}$
\end{corollary}
\begin{proof}
Consider the following four sets of possible placements of figures with $k$ edges: $S_{GG}$, the set of locally good and globally good placements, $S_{GB}$, the set of locally good but globally bad placements, $S_{BG}$, the set of locally bad but globally good placements, and $S_{BB}$, the set of locally bad and globally bad placements. We wish to show that $|S_{GG}| + |S_{GB}| = |S_{GG}| + |S_{BG}|$, or equivalently, that $|S_{GB}| = |S_{BG}|$.
To do this, it suffices to show that $|S_{GB}| + |S_{BB}| = |S_{BG}| + |S_{BB}|$. Let $C$ be the set of placements of a figure with $k$ edges that has a cycle; note that any such placement must be both locally bad and globally bad, since any graph with a cycle contains a broken-circuit under any edge-ordering. We thus have that $C \subset S_{BB}$. Because of this, Theorem \ref{convthm} implies that $|S_{GB}| + |S_{BB}| - |C| = |S_{BG}| + |S_{BB}| - |C|$, and therefore that $|S_{GB}| + |S_{BB}| = |S_{BG}| + |S_{BB}|$, as desired.
\end{proof}
We can now prove the following generalization of Theorem \ref{chrommain}.
\begin{theorem}
Fix $d$, and let $\chi_n(x)$ be the chromatic polynomial of $T^{d}_n$. Let $q_k(n^2)$ be the coefficient of $x^{n^2-k}$ in $\chi_n(x)$. Then $(-1)^kq_k(n^d)$ is eventually a polynomial in $n^d$, and this sequence of polynomials is of binomial-type.
\end{theorem}
\begin{proof}
By Whitney's broken-circuit theorem, $q_k(n^d)$ is equal to $(-1)^{k}$ times the number of $k$-element subsets of $T^{d}_n$ which contain no broken-circuit. By choosing a natural edge-ordering for $T^{d}_n$ and using the notation above, $(-1)^{k}q_k(n^d)$ is just the number of ways to place a globally good figure with $k$ edges on $T^{d}_n$. By Corollary \ref{lgcor}, this is equal to the number of ways to place a locally good figure with $k$ edges on $T^{d}_n$. By choosing $S$ to be the set of locally good connected figures, it follows from Theorem \ref{mainresult} that this number is indeed a polynomial in $n^d$ and that these polynomials form a sequence of binomial-type, as desired.
\end{proof}
\section{Acknowledgements}
This research was performed as part of MIT's Undergraduate Research Opportunities Program (UROP) in the summer of 2011. The author would like to thank Prof. Richard Stanley for introducing him to this problem, mentoring him over the course of this project, and helping edit this paper.
|
{
"timestamp": "2012-06-28T02:01:41",
"yymm": "1206",
"arxiv_id": "1206.6174",
"language": "en",
"url": "https://arxiv.org/abs/1206.6174"
}
|
\section{Introduction}
Magnetic fields influence many fluids. Magnetohydrodynamics(MHD) is concerned with the interaction between fluid flow and magnetic field. The governing equations of nonhomogeneous MHD can be stated as follows\cite{Davidson},
\begin{equation}\label{MHD} \left\{ \begin{aligned} & \rho_t + \div (\rho u) = 0,\ \ \ \mbox{in}\ \Omega\times [0, T),\\
& (\rho u)_t + \div (\rho u\otimes u) - \div(2\mu(\rho) d) - (B\cdot \nabla )B + \nabla P = 0,\ \ \mbox{in}\ \Omega\times [0, T),\\
&B_t -\lambda \Delta B - {\rm curl} (u \times B) =0,\ \ \ \ \ \mbox{in}\ \Omega\times [0, T),\\
& {\rm div} u =0, \ \ \ \ \div B=0, \ \ \ \ \mbox{in}\ \Omega\times [0, T).\end{aligned}
\right.
\end{equation}
Here $\rho$ and $u$ are the density and velocity field of fluid respectively. $P$ is the pressure. $B$ is the magnetic field. $\mu(\rho)\geq 0$ denotes the viscosity of fluid, which we assume in this paper is a positive constant. $\lambda>0$ is also a constant, which describes the relative strengths of advection and diffusion of $B$. For simplicity of writing, let $\mu = \lambda=1$, $d= \frac12 \left(\nabla u + (\nabla u)^t\right)$ is the deformation tensor.
In this paper, we focus on the system (\ref{MHD})
with the initial-boundary conditions
\begin{equation}\label{boundary-condition} u = 0,\ \ \ \ B\cdot\vec{n} = 0, \ \ \ {\rm curl}B = 0\ \ \ \mbox{on}\ \partial \Omega\times [0, T),
\end{equation}
\begin{equation}\label{initial-condition} (\rho, u, B)|_{t=0} = (\rho_0, u_0, B_0)\ \ \mbox{in}\ \Omega.
\end{equation}
Here $\Omega$ is a bounded smooth domain in $\mathbb{R}^2$.
If there is no magnetic field, i.e., $B=0$, MHD system turns to be nonhomogeneous Navier-Stokes system. In fact, due to the similarity of the second equation and the third equation in \eqref{MHD}, the study for MHD system has been along
with that for Navier-Stokes one. Let's recall some known results for 3D nonhomogeneous Navier-Stokes equations. When the initial density $\rho_0$ is bounded away from 0, the global existence of weak solutions was established by Kazhikov\cite{Kazhikov}, see also \cite{AK}. Moreover, Antontsev-Kazhikov-Monakhov\cite{AK2} gave the first result on local existence and uniqueness of strong solutions. For the two-dimensional case, they even proved that the strong solution is global. But the global existence of strong or smooth solutions in 3D is still an open problem. For more results in this direction, see \cite{LS, Salvi, gui-zhang} and references therein.
If the initial density $\rho_0$ allows vacuum, the problem becomes more complicated. Simon\cite{Simon} proved the global existence of weak solutions, see also \cite{Lions}. Choe-Kim\cite{Choe-Kim} constructed a local strong solution under some compatibility conditions on the initial data. More precisely,
they proved that if $(\rho_0$, $u_0)$ satisfy
\begin{equation}\label{initial-data-NS}
0\leq \rho_0 \in L^{\frac32}(\Omega)\cap H^2(\Omega),\ \ \ u_0\in D_{0}^1(\Omega)\cap D^2(\Omega),
\end{equation}
and the compatibility conditions
\begin{equation} \label{compatibility-ns}
{\rm div}u_0=0,\ \ \ \ -\mu \Delta u_0 + \nabla P_0 = \rho_0^{\frac12} g,\ \ \ \mbox{in}\ \Omega,
\end{equation}
with some $(P_0, g)$ belonging to $D^1(\Omega) \times L^2(\Omega)$, then there exists a positive time $T$ and a unique strong solution $(\rho, u)$ $\in C([0, T); H^2(\Omega)) \times C ([0, T); D_0^1(\Omega) \cap D^2(\Omega))$ to the nonhomogeneous Navier-Stokes equations, where $D_0^1(\Omega)$ and $D^2(\Omega)$ denote the usual homogeneous Sobolev spaces. Recall that $D_0^{1}(\mathbb{R}^3)=
\{u\in L^6(\mathbb{R}^3):\nabla u\in L^2(\mathbb{R}^3)\}$ and
$D_0^1(\Omega)=H_0^1(\Omega)$ if $\Omega\subset\subset\mathbb{R}^3$.
After the local existence of strong solution, one question came out naturally, which is whether the solution blows up in finite time. Suppose the finite blow-up time $T^*$ exists, \cite{Kim} proved the Serrin type
criterion, which says that
\begin{equation} \label{serrin-criterion}
\int_0^{T^*} \|u(t)\|_{L^r_w}^s dt = \infty,\ \ \ \mbox{for any}\ (r,s) \ \mbox{with}\ \frac{2}{s} + \frac{n}{r}= 1, \ \ n<r\leq \infty,
\end{equation}
where $n$ is the dimension of the domain and $L_w^r$ is the weak $L^r$ space. (The proof was given in \cite{Kim} only for 3D case, but almost the same proof works for 2D case.) In particular, for the 2D case, it follows from the energy
inequality the solution satisfies that $\sup_{0< T< T^*}(\|\sqrt{\rho} u\|_{L^\infty(0, T; L^2)} + \|\nabla u\|_{L^2(0, T; L^2)} )$ is bounded, which implies that $u\in L^4(0, T^*; L^4)$ if $\rho$ is bounded away from 0. Hence the criterion (\ref{serrin-criterion}) in fact implies global existence of strong solution provided that $\rho_0$ is bounded away from 0. However, if the density is allowed to vanish, whether the strong solution exists globally remains unknown. This is the main problem we shall address in this paper.
Let's go back to the MHD system (\ref{MHD}). As said before, the research for MHD goes along with that for Navier-Stokes equations. The results are similar. When $\rho$ is a constant, which means the fluid is homogeneous, the MHD system has been extensively studied. Duraut-Lions\cite{Duraut-Lions} constructed a class of weak solutions with finite energy and a class of local strong solutions. In particular, the 2D local strong solution has been proved to be global and unique. While for the three-dimensional case, different Serrin type criteria similar to \eqref{serrin-criterion} were given in \cite{He-Xin, He-Wang, Cao-Wu, Zhou-Gala}. As for the 3D Navier-Stokes equations, whether the local strong solution is global is still open.
When the fluid is nonhomogeneous, Gerbeau-Le Bris\cite{GLe}, Desjardins-Le Bris\cite{DLe} studied the global existence of weak solutions of finite energy in the whole space or in the torus. Global existence of strong solutions with small initial data in some Besov spaces was considered by Abidi-Paicu\cite{Abidi-Paicu}. Moreover, \cite{Abidi-Paicu} allowed variable viscosity and conductivity coefficients but required an essential assumption that there is no vacuum (more precisely, the initial data are closed to a constant state). Chen-Tan-Wang\cite{Chen-Tan-Wang} extended the local existence in presence of vacuum. In conclusion, if the initial data satisfies that
\begin{equation} \label{initial-conditions}
0\leq \rho_0 \in H^2, \ \ \ \ \ (u_0, B_0)\in H^2,
\end{equation}
and the compatibility conditions
\begin{equation} \label{compatibility-conditions}
\begin{aligned}
&u_0= 0, \ \ \ B_0\cdot \vec{n}=0,\ \ \ {\rm curl}B_0 = 0,\ \ \ \mbox{on}\ \partial \Omega,\\
&{\rm div}u_0 = {\rm div}B_0 = 0,\ \ \ \ -\Delta u_0 + \nabla P_0 - (B_0\cdot \nabla )B_0= \rho_0^{\frac12}g,\ \ \ \ \mbox{in}\ \Omega,
\end{aligned}\end{equation}
with some $(P_0, g)\in H^1\times L^2$, then there exist a positive time $T$ and a unique strong solution $(\rho, u, B)$ to the problem \eqref{MHD}-\eqref{initial-condition}, such that
\begin{equation}\label{strong-solution} \begin{aligned} &\rho \in C([0, T]; H^2), \ \ \ (u, B) \in C([0, T]; H^2),\\
& \ p\in C([0, T]; H^1)\cap L^2(0, T; H^2),\ \ \ (u_t, B_t) \in L^2(0, T; H^1), \\
& \mbox{and}\ \ (\rho_t, \sqrt{\rho}u_t, B_t) \in L^\infty(0, T; L^2).
\end{aligned}
\end{equation}
For all the techniques, refer to \cite{Cho-Kim}.
It comes to the question whether the local strong solution blows up. After the proof of \cite{Kim} for nonhomogeneous Navier-Stokes equations, one can get the same criterion \eqref{serrin-criterion} for nonhomogeneous MHD, see also \cite{Zhou-Fan}. In particular, for the 2D case, it says that $\|u\|_{L^2_t L^\infty_x}$ becomes unbounded once the local strong solution blows up. On the other hand, the energy inequality tells us $\|\nabla u\|_{L^2_t L^2_x}$ is uniformly bounded, which only imply that $\|u\|_{L^2_t ( BMO_x)}$ is uniformly bounded. Therefore, in view of the blowup criterion (\ref{serrin-criterion}), it's not enough to extend the local strong solution to global one. To improve the regularity of the velocity, we choose to apply a critical Sobolev inequality of logarithmic type, which is originally due to Brezis-Gallouet\cite{Brezis-Gallouet} and Brezis-Wainger\cite{Brezis-Wainger}. In this paper, we use some extension, which was proved by Ozawa\cite{Ozawa}. For a new proof, see \cite{Kozono-Ogawa-Taniuchi}. The inequality is stated as follow,
\begin{lemma}
Assume $f\in H^1(\mathbb{R}^2) \cap W^{1, q}(\mathbb{R}^2)$, with some $q> 2$. Then it holds that
\begin{equation}\label{critical-inequality}
\|f\|_{L^\infty(\mathbb{R}^2)} \leq C\left( 1 + \|\nabla f\|_{L^2(\mathbb{R}^2)} \left( \ln^+ \|f\|_{W^{1, q}(\mathbb{R}^2)} \right)^{\frac12} \right),
\end{equation}
with some constant $C$ depending only on $q$.
\end{lemma}
The same proof with some proper extension theorem(see \cite{Adam}), in fact gives the following modified inequality, which involves the integral with respect to time. For
completeness, we will give the proof in Section 2.
\begin{lemma} \label{critical-inequality-lemma} Assume $\Omega$ is a bounded smooth domain in $\mathbb{R}^2$ and $f\in L^2(s, t; H^1(\Omega))\cap L^2(s, t; W^{1, q}(\Omega))$, with some $q>2$ and $0\leq s <t \leq \infty$. Then it holds that
\begin{equation}\label{critical-inequality-time}
\|f\|_{L^2(s, t; L^\infty(\Omega))} \leq C\left(1 + \|f\|_{L^2(s, t;H^1(\Omega)) } \left(\ln^+ \|f\|_{L^2(s, t; W^{1, q}(\Omega))} \right)^{\frac12}\right),
\end{equation}
with some constant $C$ depending only on $q$ and $\Omega$, and independent of $s, t$.
\end{lemma}
The application of\eqref{critical-inequality-time} is the key idea of this paper. Due to this, we can close the estimates for $\|(u, B)\|_{L^\infty_t H^1_x}$. The higher order estimates are in the same spirit of \cite{Kim}. For more details, see Section 3. Finally, we get the result about global existence of strong solution.
\begin{theorem}\label{main-result} Assume that the initial data $(\rho_0, u_0, B_0)$ satisfies \eqref{initial-conditions} and the compatibility conditions \eqref{compatibility-conditions}. Then there exists a global strong solution
$(\rho, u, B)$ of the MHD system \eqref{MHD}-\eqref{initial-condition}, with
\begin{equation}\label{global-solution}
\begin{aligned}
& \rho \in C([0, \infty); H^2), \ \ \ (u, B) \in C([0, \infty); H^2),\\ & P\in C([0, \infty) ; H^1)\cap L^2_{loc}(0, \infty; H^2), \ \ \
(u_t, B_t) \in L^2_{loc}(0, \infty; H^1), \\
& \mbox{and}\ \ (\rho_t, \sqrt{\rho}u_t, B_t) \in L^\infty_{loc}(0, \infty ; L^2).
\end{aligned}
\end{equation}
\end{theorem}
Some remarks are given about this theorem.
\begin{remark}
The local existence of unique strong solution with vacuum to the system (\ref{MHD}) in a two-dimensional bounded domain can be established in the same manner as \cite{Choe-Kim} and \cite{Chen-Tan-Wang}. Through this paper, we will concentrate on establishing global estimates for the density, velocity and magnetic field.
\end{remark}
\begin{remark}
If we consider the most special case, where $\rho$ is a constant(the fluid is homogeneous) and $B=0$(no magnetic field), then the system \eqref{MHD} becomes the classical Navier-Stokes system. The global
existence of strong solution has been proved by Leray\cite{Leray}. More generally, if we consider the case that only $\rho$ is a constant, the system \eqref{MHD} becomes the classical homogeneous MHD system. As said before, the corresponding result has been derived by Duraut-Lions\cite{Duraut-Lions}.
\end{remark}
If $B=0$, Theorem \ref{main-result} in fact gives a positive answer to the global existence of strong solutions with vacuum of the 2D nonhomogeneous Navier-Stokes system. It covers the corresponding result in \cite{AK2}, where the density is strictly positive.
\begin{cor}
Assume that the initial data $(\rho_0, u_0)$ satisfies \eqref{initial-conditions} and the compatibility conditions \eqref{compatibility-ns}. Then there exists a global strong solution
$(\rho, u)$ of the Navier-Stokes equations, with
\begin{equation}\label{global-solution-ns}
\begin{aligned}
& \rho \in C([0, \infty); H^2), \ \ \ u \in C([0, \infty); H^2),\\ & P\in C([0, \infty) ; H^1)\cap L^2_{loc}(0, \infty; H^2),\ \ \
u_t \in L^2_{loc}(0, \infty; H^1), \\ &\mbox{and}\ \ (\rho_t, \sqrt{\rho}u_t) \in L^\infty_{loc}(0, \infty ; L^2).
\end{aligned}
\end{equation}
\end{cor}
We conclude this section with some notations and lemmas.
$L^r(\Omega), W^{k, r}(\Omega)$, $(1\leq r\leq \infty)$, are the standard Sobolev spaces, and we use $L^r=L^r(\Omega)$, $W^{k, r}= W^{k,r}(\Omega)$.
Especially, when $r=2$, denote $H^k = W^{k, 2}$.
For simplicity, let
$$\int f dx \triangleq \int_{\Omega} f dx. $$
Some more lemmas will be used during the proof of Theorem \ref{main-result}. One is following from the regularity theory for Stokes equations. For its proof, refer to \cite{Galdi}.
\begin{lemma}\label{Galdi} Assume that $(u, P)\in H_0^1 \times H^1$ is a weak solution of the stationary Stokes equations,
\begin{equation}\label{Stokes}\left\{
\begin{aligned}
& -\Delta u + \nabla P = F,\ \ \ \mbox{in}\ \Omega,\\
&{\rm div}u = 0,\ \ \ \ \ \mbox{in}\ \Omega,\\
&u=0, \ \ \ \ \ \ \mbox{on}\ \partial\Omega,
\end{aligned}\right.
\end{equation}
and $F\in L^q$, $1<q<\infty$. Then it holds that
\begin{equation}
\|u\|_{W^{2, q}} \leq C\|F\|_{L^q} + C\|u\|_{H^1},
\end{equation}
with some constant $C$ depending on $\Omega$ and $q$. Moreover, if $F\in H^1$, then
\begin{equation}
\|u\|_{H^3} \leq C \|F\|_{H^1} + C\|u\|_{H^1},
\end{equation}
with some constant $C$ depending only on $\Omega$.
\end{lemma}
The other lemma is responsible for the estimates for $B$ and follows from the classical regularity theory for elliptic equations. For its proof, refer to \cite{Nirenberg}.
\begin{lemma} \label{Nirenberg} Assume that $B\in H^1$ is a weak solution of the Poisson equations
\begin{equation}\label{Poisson} \left\{ \begin{aligned}
& \Delta B= G,\ \ \ \mbox{in}\ \Omega,\\
& B\cdot \vec{n}= 0, \ \ \ {\rm curl}B= 0, \ \ \ \mbox{on}\ \partial \Omega,
\end{aligned}
\right.
\end{equation}
and $G\in L^q$, $1<q<\infty$. Then it holds that
\begin{equation}
\|B\|_{W^{2, q}} \leq C \|G\|_{L^q} + C \|B\|_{H^1},
\end{equation}
with some constant $C$ depending on $\Omega$ and $q$. Moreover, if $G\in H^1$, then
\begin{equation}
\|B\|_{H^3} \leq C \|G\|_{H^1} + C\|B\|_{H^1},
\end{equation}
with some constant $C$ depending only on $\Omega$.
\end{lemma}
\section{Proof of Lemma 1.2}
This section is dedicated to the proof of Lemma 1.2. First we will prove the inequality \eqref{critical-inequality-time} for the whole space case, which is
\begin{equation}\label{critical-whole-space}
\|f\|_{L^2(s, t; L^\infty(\mathbb{R}^2))} \leq C\left(1 + \|f\|_{L^2(s, t;H^1(\mathbb{R}^2)) } \left(\ln^+ \|f\|_{L^2(s, t; W^{1, q}(\mathbb{R}^2))} \right)^{\frac12}\right).
\end{equation}
The proof follows exactly that in \cite{Kozono-Ogawa-Taniuchi} and lies mainly on the Littlewood-Paley decomposition. So we introduce here some new notations associated with the decomposition.
Define $\mathcal{C}$ to be the ring
$$\mathcal{C}= \left\{ \xi \in \mathbb{R}^2:\ \ \frac34 \leq |\xi| \leq \frac83 \right\},$$
and define $\mathcal{D}$ to be the ball
$$\mathcal{D}= \left\{\xi \in \mathbb{R}^2: \ \ \ |\xi|\leq \frac43 \right\}.$$
Let $\chi$ and $\varphi$ be two smooth nonnegative radial functions supported respectively in $\mathcal{D}$ and $\mathcal{C}$, such that
$$\chi(\xi) + \sum_{q\in \mathbb{N}} \varphi(2^{-q} \xi)= 1\ \ \mbox{for}\ \xi \in \mathbb{R}^2,\ \ \mbox{and}\ \ \sum_{q\in \mathbb{Z}}\varphi(2^{-q}\xi )= 1\ \ \mbox{for}\ \xi \in \mathbb{R}^2
\setminus \{0\}.$$
Denote the Fourier transform on $\mathbb{R}^2$ by $\mathcal{F}$ and denote
$$h= \mathcal{F}^{-1} \varphi,\ \ \ \ \ \ \tilde{h}= \mathcal{F}^{-1} \chi.$$
The frequency localization operator is defined by
$$\Delta_q f = \mathcal{F}^{-1} \left[ \varphi(2^{-q} \xi )\mathcal{F} (f) \right] = 2^{2q} \int_{\mathbb{R}^2} h(2^q y) f (x-y ) dy,$$
and
$$S_q f = \mathcal{F}^{-1} \left[ \chi(2^{-q}\xi ) \mathcal{F}(f) \right]= 2^{2q} \int_{\mathbb{R}^2} \tilde{h}(2^q y) f(x-y) dy. $$
Now it's ready to prove \eqref{critical-whole-space}.
\begin{proof}
Decompose $f$ into three parts such as
\begin{equation} \begin{aligned}
f(x, \tau) & = S_{-N-1} f(x, \tau) + \sum_{|j|\leq N} \Delta_{j} f(x, \tau) + \sum_{j>N }\Delta_j f(x, \tau)\\
& = f_1(x, \tau) + f_2(x, \tau) + f_3(x, \tau).
\end{aligned}
\end{equation}
By Bernstein's inequality(see \cite{Chemin}),
\begin{equation}
\|f_1\|_{L^2(s, t; L^\infty)}\leq C^{-2N/q}\|f\|_{L^2(s, t; L^q)},\ \ \ q\in [1, \infty).
\end{equation}
Similarly,
\begin{equation}\begin{aligned}
\|f_2\|_{L^2(s, t; L^\infty) } & \leq \sum_{|j|\leq N} \|\Delta_j f\|_{L^2(s, t; L^\infty)} \\
& \leq C N^{\frac12} \left( \| \nabla (\Delta_j f ) \|_{L^2(s, t; L^2)}^2 \right)^{\frac12}\\
& \leq CN^{\frac12} \|\nabla f\|_{L^2(s, t; L^2)},
\end{aligned}\end{equation}
and
\begin{equation} \begin{aligned}
\|f_3\|_{L^2(s, t; L^\infty)} & \leq \sum_{j > N } \|\Delta_j f\|_{L^2(s, t; L^\infty)} \\
& \leq C\sum_{j>N} 2^{2j (1/q - 1/2)} \|\nabla f\|_{L^2(s, t; L^q)}\\
& = C 2^{(2/q- 1)N} \|\nabla f\|_{L^2(s, t; L^q)}.
\end{aligned}\end{equation}
If we set $\kappa = \min (2/q, \ 2(1/2-1/q))$, then
\begin{equation}
\|f\|_{L^2(s, t; L^\infty)} \leq C \left\{ 2^{-\kappa N} \|f\|_{L^2(s, t; W^{1,q})} + N^{\frac12} \|\nabla f\|_{L^2(s, t; L^2)} \right\}.
\end{equation}
Choose $N = \left[ \log_{2^\kappa} \frac{ \|f\|_{L^2(s, t; W^{1,q}) }}{\|\nabla f\|_{L^2(s, t; L^2)}}\right]+ 1$, hence
we derive that
\begin{equation}
\|f\|_{L^2(s, t; L^\infty)} \leq C \|\nabla f\|_{L^2(s, t; L^2)} \left( 1 + \left( \ln^+ \frac{ \|f\|_{L^2(s, t; W^{1,q}) }}{\|\nabla f\|_{L^2(s, t; L^2)}} \right)^{1/2} \right),
\end{equation}
which implies \eqref{critical-whole-space}.
\end{proof}
Combining the extension theorem(see \cite{Adam}) and \eqref{critical-whole-space}, we prove Lemma 1.2.
\section{Proof of Theorem 1.3}
This section is dedicated to the proof of Theorem 1.3. Define the quantity $\Phi(T)$ as follow,
\begin{equation}\label{Phi}\begin{aligned}
\Phi(T)& = \sup_{0\leq t\leq T}\left(\| \rho(t)\|_{H^2}^2 + \|u(t)\|_{H^2}^2 + \|B(t)\|_{H^2}^2 \right) + \|\sqrt{\rho} u_t\|_{L^\infty(0, T; L^2)}^2\\
& \ \ \ + \int_0^T \left(\|u(t)\|_{H^3}^2 + \|B(t)\|_{H^3}^2 \right)dt+ \int_0^T\left( \|u_t\|_{H^1}^2 + \|B_t\|_{H^1}^2\right)dt.
\end{aligned}\end{equation}
Suppose the local strong solution blows up at $T^*<\infty$, we will prove that in fact there exists a generic constant $\bar{M}<\infty$ depending only the initial data and $T^*$ such that
\begin{equation} \label{final} \sup_{0\leq T < T^*} \Phi(T)\le\bar{M}. \end{equation}
Having (\ref{final}) at hand, it is easy to show without many difficulties that we can extend the strong solution beyond $T^*$, which gives a contradiction. Hence the local strong solution does not blow up in finite time. Also, the uniqueness of strong solutions is a standard procedure.
Through out this section, $C$ denote a generic constant only depending on the initial data and $T^*$. The proof is divided into five steps, due to different level estimates.
Before proceeding, we write another equivalent form of \eqref{MHD} for convenience, which is
\begin{equation}\label{MHD-new}\left\{\begin{aligned} & \rho_t + u\cdot \nabla \rho = 0,\\
&\rho u_t - \Delta u + (\rho u\cdot \nabla )u - (B\cdot \nabla ) B + \nabla P =0, \\
&B_t - \Delta B + (u\cdot \nabla ) B - (B\cdot \nabla )u = 0,\\
&\div u =0, \ \ \ \div B =0.
\end{aligned}
\right. \end{equation}
Now we start the proof of Theorem \ref{main-result}.
\vspace{2mm}{\bf Step I\ \ $L^\infty$ bound for $\rho$.}\ The equation $\eqref{MHD-new}_1$ for density is a transport equation, then for every $0\leq t< T^*$,
\begin{equation} \label{density}
\|\rho(t)\|_{L^\infty} = \|\rho_0\|_{L^\infty}.
\end{equation}
\vspace{2mm}{\bf Step II\ \ Basic energy estimate}
\begin{pro}[Energy inequality] There exists a constant $M$ depending only on $\|\sqrt{\rho_0} u_0\|_{L^2}$ and $\|B_0\|_{L^2}$, such that for every $0< T< T^*$,
\begin{equation}\label{energy-estimate}\|\sqrt{\rho} u \|_{L^\infty(0, T; L^2 )}^2 + \|B\|_{L^\infty(0, T; L^2)}^2 + \int_0^T \|\nabla u\|_{L^2}^2 dt + \int_0^T \|\nabla B\|_{L^2}^2 dt
\leq M.\end{equation}
\end{pro}
\begin{proof} The proof is standard.
Multiplying $\eqref{MHD-new}_2$ and $\eqref{MHD-new}_3$ by $u$ and $B$ respectively, then adding the two resulting equations together, integrating over $\Omega$, one can get that
\begin{equation}
\frac{1}{2}\frac{d}{dt} \int \rho |u|^2dx +\frac12\frac{d}{dt} \int |B|^2 dx + \int |\nabla u|^2 dx + \int |\nabla B|^2 dx =0,
\end{equation}
where integration by parts was applied. It implies that the inequality \eqref{energy-estimate} holds and consequently completes the proof.
\end{proof}
{\bf Step III\ \ Estimates for $\|(\sqrt{\rho}u_t,\ B_t)\|_{L^2(0, T; L^2)}$ and $\|(\nabla u, \nabla B)\|_{L^\infty(0, T; L^2)}.$}
This is a crucial step during the proof. Higher order estimates of the density, velocity and magnetic field can be done in a standard way provided that
$\|(u,\ B)\|_{H^1}$ is uniformly bounded with respect to time. To prove that, we will make use of some extension of critical Sobolev inequality of logarithmic type, as indicated by Lemma \ref{critical-inequality-lemma}.
\begin{pro} \label{First-Level}Under the assumptions in Theorem \ref{main-result}, it holds that
\begin{equation}\label{first-level}
\sup_{0<T<T^*}\left \{\|(u(T),\ B(T))\|_{H^1}^2 + \int_0^T \|(\sqrt{\rho}u_t,\ B_t)\|_{L^2}^2 dt \right\}< \infty.
\end{equation}
\end{pro}
\begin{proof}
Multiplying the equation $\eqref{MHD-new}_2$ by $u_t$ and integrating over $\Omega$ lead to
\begin{equation}\label{first-level-1}
\frac12\frac{d}{dt} \int |\nabla u|^2 dx + \int \rho |u_t|^2 dx =
- \int (\rho u\cdot \nabla u)\cdot u_t dx + \int (B\cdot \nabla) B\cdot u_t dx.
\end{equation}
By H\"older's inequality and Young inequality,
\begin{equation}\label{first-level-2}\begin{aligned}
\left| \int (\rho u \cdot \nabla) u \cdot u_t dx \right|& \leq C\|\sqrt{\rho} u_t\|_{L^2 } \cdot \|u\|_{L^\infty} \cdot \|\nabla u\|_{L^2}\\
& \leq \frac{1}{2} \|\sqrt{\rho}u_t\|_{L^2}^2 + C\|u\|_{L^\infty}^2 \|\nabla u\|_{L^2}^2.
\end{aligned}
\end{equation}
Applying integration by parts with the conditions that ${\rm div} B=0$ in $\Omega$ and $B\cdot \vec{n}= 0$ on $\partial \Omega$, then
\begin{equation} \label{first-level-3}\begin{aligned} &\int (B\cdot \nabla) B\cdot u_t dx \\
=& \frac{d}{dt}\int (B\cdot \nabla ) B\cdot u dx - \int (B_t \cdot \nabla )B \cdot u dx - \int (B\cdot \nabla ) B_t \cdot u dx\\
=& -\frac{d}{dt} \int (B\cdot \nabla ) u \cdot B dx + \int (B_t \cdot \nabla ) u \cdot B dx + \int (B\cdot \nabla ) u \cdot B_t dx\\
\leq & - \frac{d}{dt} \int (B\cdot \nabla ) u \cdot B dx + C\|B\|_{L^\infty}^2\|\nabla u\|_{L^2}^2 + \frac12 \|B_t\|_{L^2}^2.\end{aligned} \end{equation}
Hence, combining \eqref{first-level-1}-\eqref{first-level-3}, we get that
\begin{equation}\label{first-level-4} \begin{aligned}
&\frac12 \|\sqrt{\rho} u _t\|_{L^2}^2 + \frac12 \frac{d}{dt}\int |\nabla u|^2 dx + \frac{d}{dt}\int (B\cdot \nabla ) u \cdot B dx \\
\leq & C\left( \|u\|_{L^\infty}^2 + \|B\|_{L^\infty}^2\right) \|\nabla u\|_{L^2}^2 + \frac12 \|B_t\|_{L^2}^2.
\end{aligned}\end{equation}
Similarly, multiplying the equation $\eqref{MHD-new}_3$ by $B_t$ and integrating over $\Omega$ lead to
\begin{equation}\label{first-level-5}
\begin{aligned} & \frac12 \frac{d}{dt} \int |\nabla B|^2 dx + \int |B_t|^2 dx \\
= & - \int (u \cdot \nabla B) \cdot B_t dx + \int (B\cdot \nabla ) u \cdot B_t dx \\
\leq & \frac12 \|B_t\|_{L^2}^2 + C\|u\|_{L^\infty}^2 \|\nabla B\|_{L^2}^2 + C\|\nabla u\|_{L^2}^2 \|B\|_{L^\infty}^2,
\end{aligned}
\end{equation}
which implies that
\begin{equation} \label{first-level-6}
\frac{d}{dt}\int |\nabla B|^2 dx + \|B_t\|_{L^2}^2 \leq C\|u\|_{L^\infty}^2 \|\nabla B\|_{L^2}^2 + C\|B\|_{L^\infty}^2 \|\nabla u\|_{L^2}^2.
\end{equation}
The term $\int (B\cdot \nabla ) u \cdot B dx $ on the left hand of \eqref{first-level-4} can not be determined positive or negative, so we choose some appropriate positive terms to control it. Note that it follows from Gagliardo-Nirenberg inequality that
\begin{equation} \label{first-level-7} \begin{aligned}
\left|\int (B\cdot \nabla ) u \cdot B dx \right|
\leq & \|B\|_{L^4}^2 \|\nabla u\|_{L^2}\\
\leq & C\|B\|_{L^2} \|B\|_{H^1} \|\nabla u\|_{L^2} \\
\leq & \frac{1}{4}\|\nabla u \|_{L^2}^2 + C_1 \|B\|_{L^2}^2 (\|B\|_{L^2}^2 + \|\nabla B\|_{L^2}^2).
\end{aligned}\end{equation}
Next, we multiply \eqref{first-level-6} by $2C_1 M +2$, where $C_1$ and $M$ are constants appearing in \eqref{first-level-7} and \eqref{energy-estimate}, add it to \eqref{first-level-4} and integrate
with respect to time, then for every $0\leq s <T<T^* $,
\begin{equation}\label{first-level-8} \begin{aligned}
&\int |\nabla u(T) |^2 dx + \int |\nabla B(T)|^2 dx + \int_s^T \|\sqrt{\rho}u_t\|_{L^2}^2 d\tau + \int_s^T \|B_t\|_{L^2}^2 d\tau \\
\leq & C \left[ \int |\nabla u(s) |^2 dx + \int |\nabla B(s)|^2 dx\right] \exp \left\{ C\int_s^T (\|u\|_{L^\infty}^2 + \|B\|_{L^\infty}^2) d\tau \right \} + C.
\end{aligned} \end{equation}
Denote \begin{equation} \label{first-level-9} \Psi (t) = e+\sup_{0\leq \tau \leq t} \left( \|u(\tau)\|_{H^1}^2 + \|B(\tau)\|_{H^1}^2\right) + \int_0^t \left( \|\sqrt{\rho}u_t\|_{L^2}^2 + \|B_t\|_{L^2}^2 \right) d\tau , \end{equation}
then \eqref{first-level-8} and \eqref{energy-estimate} give that for every $0\leq s < T < T^*$,
\begin{equation}\label{first-level-10}
\Psi(T) \leq C \Psi(s) \exp \left\{ C\int_s^T (\|u\|_{L^\infty}^2 + \|B\|_{L^\infty}^2) d\tau \right \}.
\end{equation}
To get a proper estimate for $\|u\|_{L_t^2 L_x^\infty}$ and $\|B\|_{L^2_t L_x^\infty}$, we get help from Lemma \ref{critical-inequality-lemma}.
\begin{equation} \label{first-level-11} \begin{aligned}
& \|u\|_{L^2(s, T; L^\infty) }^2 + \|B\|_{L^2(s, T; L^\infty)}^2 \\ \leq & C \left\{ 1 + (\|u\|_{L^2(s, T; H^1)}^2 + \|B\|_{L^2(s, T; H^1)}^2 ) \left(\ln^+ \|u\|_{L^2(s, T; W^{1,4})} + \ln^+ \|B\|_{L^2(s, T; W^{1,4})}\right)\right\}.
\end{aligned}
\end{equation}
Applying Lemma \ref{Galdi} to the equation $\eqref{MHD-new}_2$ yields
\begin{equation}\label{first-level-12}
\|u\|_{W^{1,4}} \leq C \|u\|_{H^1} + C \|\rho u_t \|_{L^{\frac43}} + C \|(\rho u\cdot \nabla)u - (B\cdot \nabla )B\|_{L^{\frac43}} ,
\end{equation}
which implies
\begin{equation}\label{first-level-13} \begin{aligned}
\|u\|_{L^2(s, T; W^{1,4})} \leq & C \|u\|_{L^2(s, T; H^1)} + C\|\sqrt{\rho} u_t\|_{L^2(s, T; L^2)} \\ & + C \|u\|_{L^2(s, T; H^1)} \|\nabla u\|_{L^\infty(s, T; L^2)}
+ C \|B\|_{L^2(s, T; H^1)} \|\nabla B\|_{L^\infty(s, T; L^2)}. \end{aligned} \end{equation}
Similarly, applying Lemma \ref{Nirenberg} to the equation $\eqref{MHD-new}_3$ to obtain
\begin{equation} \label{first-level-14} \begin{aligned}
\|B\|_{L^2(s, T; W^{1,4})} \leq & C \|B\|_{L^2(s, T; H^1)} + C\|B_t\|_{L^2(s, T; L^2)} \\ & + C \|u\|_{L^2(s, T; H^1)} \|\nabla B\|_{L^\infty(s, T; L^2)}
+ C \|B\|_{L^2(s, T; H^1)} \|\nabla u\|_{L^\infty(s, T; L^2)}.
\end{aligned} \end{equation}
Note that the constant $C$ in \eqref{first-level-13} and \eqref{first-level-14} does not depend on $u$, $B$, $s$ or $T$. It only depends on the domain $\Omega$. Taking the energy inequality
\eqref{energy-estimate} into consideration, then for every $0\leq s<T< T^*$,
\begin{equation}\label{first-level-15}\begin{aligned}
& \|u\|_{L^2(s, T; L^\infty)}^2 + \|B\|_{L^2(s, T; L^\infty)}^2 \\ \leq & C_2 \left\{1 +( \|u\|_{L^2(s, T; H^1)}^2 + \|B\|_{L^2(s, T; H^1)}^2 )\ln \left( C(M, T^*) \Psi(T) \right) \right\},
\end{aligned}
\end{equation}
where $C_2$ is constant which only depends on $\Omega$, and $C(M, T^*)$ is a constant depending on $M$ in \eqref{energy-estimate} and $T^*$.
Substituting \eqref{first-level-15} into \eqref{first-level-10}, it arrives at
\begin{equation}\label{first-level-16}
\Psi(T) \leq C \Psi(s) \left[ C(M, T^*) \Psi(T) \right]^{C_2 \left( \|u\|_{L^2(s, T; H^1)}^2 + \|B\|_{L^2(s, T; H^1)}^2 \right)}.
\end{equation}
Recall the energy estimate (\ref{energy-estimate}), one can choose $s$ close enough to $T^*$, such that
\begin{equation} \lim_{T\rightarrow T^*} C_2\left( \|u\|_{L^2(s, T; H^1)}^2 + \|B\|_{L^2(s, T; H^1)}^2 \right) \leq \frac12, \end{equation}
then for every $s<T< T^*$, we have
\begin{equation}
\Psi(T) \leq C \Psi(s)^2 \cdot C(M, T^*)^2,
\end{equation}
which completes the proof of Proposition \ref{First-Level}.
\end{proof}
\begin{remark} Unfortunately, we can not get any explicit bound for $\|(u, B)\|_{H^1}$ in terms of the initial data, due to the technique used here.
\end{remark}
We have some more estimates as corollaries of Proposition \ref{First-Level}.
\begin{pro}\label{first-level-add-1} Assume that \begin{equation}\label{assumption} \sup_{0< T< T^*} \left\{ \|(u(T), B(T))\|_{H^1}^2 + \int_0^T \|(\sqrt{\rho}u_t, B_t)\|_{L^2}^2 dt\right\} \leq C_3.\end{equation}
Then there exists a constant $C_4$ depending on $C_3$, such that
\begin{equation} \label{first-level-11-1}
\sup_{0< T<T^*} \left\{ \|u\|_{L^2(0, T; H^2) }+ \|B\|_{L^2 (0, T; H^2)} \right\} \leq C_4.
\end{equation}
\end{pro}
\begin{proof} The equation $\eqref{MHD-new}_2$, together with Lemma \ref{Galdi}, gives us that
\begin{equation}\label{first-level-12-1}\begin{aligned}
\|u\|_{H^2} & \leq C\|u\|_{H^1} + C\|\rho u_t\|_{L^2} + C\|(\rho u \cdot \nabla ) u\|_{L^2} + C\| (B\cdot \nabla ) B\|_{L^2}\\
& \leq C\|u\|_{H^1}+ C\|\sqrt{\rho}u_t\|_{L^2} + C\|u\|_{L^\infty} \|\nabla u\|_{L^2} + C \|B\|_{L^\infty} \|\nabla B\|_{L^2}.
\end{aligned}\end{equation}
Similarly, by Lemma \ref{Nirenberg},
\begin{equation} \label{first-level-13-1}\begin{aligned}
\|B\|_{H^2} \leq C\|B\|_{H^1} + C\|B_t\|_{L^2} + C\|u\|_{L^\infty}\|\nabla B\|_{L^2} + C\|B\|_{L^\infty} \|\nabla u\|_{L^2}. \end{aligned}
\end{equation}
Combining the two inequalities \eqref{first-level-12-1} and \eqref{first-level-13-1}, we have
\begin{equation}\label{first-level-14-1}
\begin{aligned}
& \|u\|_{H^2} + \|B\|_{H^2} \\ \leq & C \|\sqrt{\rho} u_t\|_{L^2} + C\|B_t\|_{L^2} + C \left(\|u\|_{L^\infty} + \|B\|_{L^\infty} + 1 \right)\cdot \left(\| u\|_{H^1} + \| B\|_{H^1}\right)\\
\leq & C\left(\|u\|_{H^2} + \|B\|_{H^2}\right)^{1/2} \left(\|u\|_{L^2}+ \|B\|_{L^2}\right)^{1/2} \cdot \left(\| u\|_{H^1} + \|B\|_{H^1}\right)\\
&\ + C \left( \| u\|_{H^1} + \|B\|_{H^1} \right)+ C \|\sqrt{\rho} u_t\|_{L^2} + C\|B_t\|_{L^2}.
\end{aligned}\end{equation}
where Gagliardo-Nirenberg inequality was used. Hence,
\begin{equation}\label{first-level-15-1}
\|u\|_{H^2}+ \|B\|_{H^2} \leq C\|\sqrt{\rho} u_t\|_{L^2} + C\|B_t\|_{L^2} + C \left(1+ \|u\|_{H^1} + \|B\|_{H^1}\right)^3,
\end{equation}
which completes the proof for \eqref{first-level-11-1}.
\end{proof}
\begin{pro}\label{first-level-add-2}Assume \eqref{assumption} holds, then there exists some constant $C_5$ depending on $C_3$ such that
\begin{equation} \label{first-level-16-1} \sup_{0<T < T^*} \left\{ \|u\|_{L^4(0, T; L^\infty)} + \|B\|_{L^4(0, T; L^\infty)} \right\} \leq C_5.
\end{equation}
\end{pro}
\begin{proof}
By Gagliardo-Nirenberg inequality,
\begin{equation}
\|u\|_{L^\infty} \leq C\|u\|_{L^2}^{1/2} \cdot \|u \|_{H^2}^{1/2},
\end{equation}
and
\begin{equation}
\|B\|_{L^\infty} \leq C \|B\|_{L^2}^{1/2} \cdot \|B\|_{H^2}^{1/2},
\end{equation}
which together with \eqref{first-level-11-1} completes the proof for \eqref{first-level-16-1}.
\end{proof}
\vspace{2mm} {\bf Step IV\ \ Estimates for $\|(\sqrt{\rho}u_t, \ B_t)\|_{L^\infty(0, T; L^2)}$ and $\|(\nabla u_t, \nabla B_t)\|_{L^2(0, T; L^2)}$} From now on, the estimates are standard, due to the
proof in \cite{Kim}. We write them down here for completeness.
\begin{pro}Under the assumptions in Theorem \ref{main-result}, it holds that
\label{Second-Level}
\begin{equation}\label{second-level}
\sup_{0<T<T^*}\left \{\|(\sqrt{\rho} u_t(T), \ B_t(T))\|_{H^1} + \int_0^T \|(\nabla u_t, \ \nabla B_t)\|_{L^2}^2 dt \right\}< \infty.
\end{equation}
\end{pro}
\begin{proof} Taking $t$-derivative of the equation $\eqref{MHD-new}_2$, then one gets that
\begin{equation}\label{second-level-1}\begin{aligned}
&\rho u_{tt} + (\rho u\cdot \nabla ) u_t - \Delta u_t + \nabla P_t \\ & = -\rho_t u_t - (\rho_t u \cdot \nabla ) u - (\rho u_t \cdot \nabla ) u + (B_t \cdot \nabla ) B + (B\cdot \nabla ) B_t.
\end{aligned} \end{equation}
Multiplying \eqref{second-level-1} by $u_t$ and integrating over $\Omega$,
\begin{equation}\label{second-level-2}\begin{aligned}
& \frac12 \frac{d}{dt} \int \rho |u_t|^2 dx + \int |\nabla u_t|^2 dx
= - \int \rho_t |u_t|^2 dx - \int (\rho_t u \cdot \nabla )u \cdot u_t dx \\ & \ \ \ \ - \int (\rho u_t \cdot \nabla )u \cdot u_t dx + \int(B_t \cdot \nabla )B\cdot u_t dx + \int (B\cdot \nabla) B_t \cdot u_t dx.
\end{aligned}
\end{equation}
We estimate the terms on the right hand one by one. Taking $\eqref{MHD}_1$ into consideration, we get that
\begin{equation}\label{second-level-3}\begin{aligned}
-\int \rho_t |u_t|^2 dx & = \int {\rm div}(\rho u) |u_t|^2 dx \\
& = -\int 2\rho u \cdot \nabla u_t \cdot u_t dx \\
& \leq \frac18 \|\nabla u_t \|_{L^2}^2 + C\|\sqrt{\rho}u_t\|_{L^2}^2 \|u\|_{L^\infty}^2,
\end{aligned} \end{equation}
and also for the second term,
\begin{equation} \label{second-level-4}\begin{aligned}
& -\int (\rho_t u\cdot \nabla ) u \cdot u_t dx\\
=& -\int \rho u \cdot \nabla [(u\cdot \nabla) u\cdot u_t]dx\\
\leq & \int |\rho u_t| |u| |\nabla u|^2 dx + \int |\rho u_t | |u|^2 |\nabla ^2 u| dx + \int \rho |u|^2 |\nabla u| |\nabla u_t| dx\\
\end{aligned}\end{equation}
Here by Gagliardo-Nirenberg inequality,
\begin{equation}\label{second-level-5}\begin{aligned}
& \int |\rho u_t| |u| |\nabla u|^2 dx \\
& \leq \|\sqrt{\rho }u_t\|_{L^2}\|u\|_{L^\infty} \|\nabla u\|_{L^4}^2 \\
& \leq C\|\sqrt{\rho}u_t \|_{L^2}\|u\|_{L^\infty} \|\nabla u\|_{L^2} \|\nabla u\|_{H^1} \\
& \leq \|u\|_{L^\infty}^2 \|\sqrt{\rho}u_t\|_{L^2}^2 + C\|\nabla u\|_{L^2}^2 \| u\|_{H^2}^2.
\end{aligned} \end{equation}
By Young inequality,
\begin{equation}\label{second-level-6}\begin{aligned}
&\int |\rho u_t | |u|^2 |\nabla ^2 u| dx \\
& \leq C\|\sqrt{\rho} u_t\|_{L^2} \|u\|_{L^\infty}^2 \|\nabla^2 u\|_{L^2} \\
& \leq \|u\|_{L^\infty}^4 \|\sqrt{\rho} u_t\|_{L^2}^2 + C\| u\|_{H^2}^2. \\
\end{aligned}\end{equation}
And similarly,
\begin{equation}\label{second-level-7}\begin{aligned}
& \int \rho |u|^2 |\nabla u| |\nabla u_t| dx\\
\leq & C\|u\|_{L^\infty}^2 \|\nabla u\|_{L^2}\|\nabla u_t\|_{L^2}\\
\leq & \frac18 \|\nabla u_t\|_{L^2}^2 + C\|u\|_{L^\infty}^4 \|\nabla u\|_{L^2}^2.
\end{aligned} \end{equation}
For the third term of the right hand of \eqref{second-level-2}, by Poincar\'{e} inequality and Gagliardo-Nirenberg inequality,
\begin{equation}\label{second-level-8} \begin{aligned} & -\int (\rho u_t \cdot \nabla ) u \cdot u_t dx \\
\leq &C\|\sqrt{\rho} u_t \|_{L^2} \|\nabla u\|_{L^4} \|u_t\|_{L^4} \\
\leq & C \|u\|_{H^2}^2 \|\sqrt{\rho}u_t\|_{L^2}^2 + \frac18 \|\nabla u_t\|_{L^2}^2.
\end{aligned} \end{equation}
Since ${\rm div}B_t =0 $ in $\Omega$ and $B_t \cdot \vec{n}= 0$ on $\partial \Omega$, then
\begin{equation} \label{second-level-9} \begin{aligned} & \int (B_t \cdot \nabla ) B \cdot u_t dx\\
= & -\int (B_t \cdot \nabla) u_t \cdot B dx\\
\leq & \frac18 \|\nabla u_t \|_{L^2 }^2 + C \|B\|_{L^\infty}^2 \|B_t\|_{L^2}^2.
\end{aligned} \end{equation}
And similarly,
\begin{equation} \label{second-level-10} \begin{aligned} & \int (B\cdot \nabla ) B_t \cdot u_t dx \\
\leq & \frac18 \|\nabla u_t\|_{L^2}^2+ C\|B\|_{L^\infty}^2 \|B_t\|_{L^2}^2.
\end{aligned} \end{equation}
Now we turn to the equation for $B$. Taking $t$-derivative of $\eqref{MHD-new}_3$, multiplying by $B_t$ and integrating over $\Omega$, then
\begin{equation} \label{second-level-12} \begin{aligned} & \frac12 \frac{d}{dt} \int |B_t|^2dx + \int |\nabla B_t|^2 dx \\
= & -\int (u_t \cdot \nabla ) B\cdot B_t dx + \int (B_t \cdot \nabla) u\cdot B_t dx + \int (B\cdot \nabla ) u_t \cdot B_t dx\\
\end{aligned} \end{equation}
Here Poincar\'{e} inequality gives that
\begin{equation} \label{second-level-13}\begin{aligned} &-\int (u_t \cdot \nabla ) B\cdot B_t dx \\
\leq & \|u_t\|_{L^4}\|\nabla B\|_{L^4} \|B_t\|_{L^2}\\
\leq & \frac18 \|\nabla u_t\|_{L^2}^2 + C \|\nabla B\|_{H^1}^2\|B_t\|_{L^2}^2 .
\end{aligned}\end{equation}
Gagliardo-Nirenberg inequality gives that
\begin{equation} \label{second-level-14}\begin{aligned} &\int (B_t \cdot \nabla ) u\cdot B_t dx\\
\leq & \|B_t\|_{L^4}^2 \|\nabla u\|_{L^2}\\
\leq & \frac18 \| B_t\|_{H^1}^2 + C\|\nabla u\|_{L^2}^2 \|B_t\|_{L^2}^2.
\end{aligned} \end{equation}
And H\"older's inequality gives that
\begin{equation}\label{second-level-15}\begin{aligned} & \int (B\cdot \nabla ) u_t \cdot B_t dx\\
\leq & \frac18\|\nabla u_t\|_{L^2}^2 + C \|B\|_{L^\infty}^2 \|B_t\|_{L^2}^2.
\end{aligned}\end{equation}
Collecting all the estimates \eqref{second-level-2}-\eqref{second-level-15} and taking Proposition \ref{First-Level}, \ref{first-level-add-1}, \ref{first-level-add-2} into account, we get that
\begin{equation} \label{second-level-16} \begin{aligned} & \frac12 \frac{d}{dt} \int |\sqrt{\rho}u_t |^2 dx + \frac12 \frac{d}{dt} \int |B_t|^2 dx + \frac14 \int |\nabla u_t|^2 dx + \frac14\int |\nabla B_t|^2 dx
\\ \leq & C(1+ \|u\|_{L^\infty}^4 + \|B\|_{L^\infty}^2 + \|u\|_{H^2}^2 + \|B\|_{H^2}^2) (\|\sqrt{\rho} u_t\|_{L^2}^2 + \|B_t\|_{L^2}^2 )\\
& + C \|\nabla u\|_{L^2}^2 \|u\|_{H^2}^2 + C \|u\|_{L^\infty}^4 \|\nabla u\|_{L^2}^2 ,
\end{aligned}\end{equation}
which together with Gronwall's inequality completes the proof of Proposition \ref{Second-Level}.
\end{proof}
As a corollary, we can bound $\|u\|_{L^2_t W^{2,4}_x}$, which will play an important role in the estimates for $\rho$.
\begin{pro}\label{second-level-corollary-1} Under the assumptions of Theorem \ref{main-result}, it holds that \begin{equation} \label{second-level-corollary}
\sup_{0< T< T^*} \left\{ \|u\|_{L^2(0, T; W^{2,4}) } \right\} < \infty.
\end{equation}
\end{pro}
\begin{proof} It follows from Lemma \ref{Galdi} that
\begin{equation} \begin{aligned} &\ \ \ \ \ \|u\|_{W^{2,4}} \\
& \leq C\|u\|_{H^1} + C \|\rho u_t\|_{L^4} + C\|(\rho u\cdot \nabla) u\|_{L^4} + C\|(B\cdot \nabla) B\|_{L^4}\\
& \leq C\|u\|_{H^1} + C \|\nabla u_t\|_{L^2} + C\|u\|_{L^\infty} \|\nabla u\|_{L^4} + C\|B\|_{L^\infty}\|\nabla B\|_{L^4}\\
& \leq C\|u\|_{H^1} + C\|\nabla u_t\|_{L^2} +C \|u\|_{L^\infty} \|\nabla u\|_{L^2}^{1/2} \|u\|_{H^2}^{1/2} + C\|B\|_{L^\infty}\|\nabla B\|_{L^2}^{1/2} \|B\|_{H^2}^{1/2},
\end{aligned}\end{equation}
which finishes the proof of \eqref{second-level-corollary}.
\end{proof}
Furthermore, we have the following proposition.
\begin{pro} Under the assumptions of Theorem \ref{main-result}, it holds that
\label{second-level-corollary-2} \begin{equation} \sup_{0< T< T^*} \left\{ \|u\|_{H^2}+ \|B\|_{H^2} \right\} < \infty. \end{equation}
\end{pro}
\begin{proof} If the inequality \eqref{first-level-15} is reconsidered, then the proof is done.
\end{proof}
{\bf Step V\ \ Estimates for $\|\nabla \rho \|_{L^\infty(0, T; H^1)}$ and $ \|(u, B)\|_{L^2(0, T; H^3)}$. }
\begin{pro}Under the assumptions of Theorem \ref{main-result}, it holds that
\label{Third-Level}
\begin{equation}\label{third-level}
\sup_{0<T<T^*}\left \{ \|\rho\|_{L^\infty(0, T; H^2)} + \int_0^T \left (\|u\|_{H^3}^2 + \|B\|_{H^3}^2 \right) dt \right\}< \infty.
\end{equation}
\end{pro}
\begin{proof}
Taking the $x_j$ ($j=1, 2$)-derivative of $\eqref{MHD-new}_1$,
\begin{equation}\label{third-level-1}
(\rho_{x_j})_t + u\cdot \nabla \rho_{x_j} = - u_{x_j} \cdot \nabla \rho.
\end{equation}
Multiplying the new equation by $\rho_{x_j}$, integrating over $\Omega$, and summing up, then we obtain
\begin{equation}\label{third-level-2}
\frac{d}{dt} \int |\nabla \rho|^2 dx \leq C\int |\nabla u||\nabla \rho |^2 dx \leq C\|\nabla u\|_{L^\infty } \|\nabla \rho\|_{L^2}^2.
\end{equation}
Similarly, we have the following higher order estimate for $\rho$,
\begin{equation}\label{third-level-3} \begin{aligned}
\frac{d}{dt} \int |\nabla^2 \rho|^2 dx & \leq C \int \left( |\nabla u||\nabla^2 \rho|^2 + |\nabla^2 u||\nabla \rho| |\nabla^2 \rho| \right)dx \\
& \leq C \|\nabla u\|_{L^\infty} \|\nabla^2 \rho\|_{L^2}^2 + \|\nabla^2 u \|_{L^4} \|\nabla \rho\|_{L^4} \|\nabla^2 \rho\|_{L^2}.
\end{aligned} \end{equation}
Making use of Sobolev embedding inequality and Gronwall's inequality, we get that
\begin{equation}\label{third-level-4}
\|\nabla \rho(T) \|_{H^1}^2 \leq C \|\nabla \rho_0\|_{H^1}^2 \exp \left( \int_0^T C \|\nabla u(t)\|_{W^{1,4}} dt \right) < \infty.
\end{equation}
It follows from Lemma \ref{Galdi} that
\begin{equation}\label{third-level-4}\begin{aligned}
\|u\|_{H^3} & \leq C\left( \|u\|_{H^1} + \|\rho u_t\|_{H^1} + \|\rho u\cdot \nabla u\|_{H^1} + \|B \cdot \nabla B\|_{H^1} \right) \\
& \leq C \left( \|u\|_{H^1} + \|\nabla \rho\|_{L^2} \|u_t\|_{L^2}+ \| u_t\|_{H^1}+\|\nabla \rho\|_{L^2} \|u\|_{L^\infty} \|\nabla u\|_{L^2} \right) \\
&\ \ \ \ \ \ + C\left( \|\nabla u\|_{L^2}^2 + \|u\|_{L^\infty} \|\nabla u\|_{H^1} + \|B\|_{H^1}^2 +\|B\|_{L^\infty} \|\nabla B\|_{H^1} \right)
\end{aligned}
\end{equation}
which implies that $\sup_{0< T< T^*} \|u\|_{L^2(0, T; H^3)} < \infty.$ Similar proof leads to the same conclusion for $B$. This completes the proof of Proposition \ref{Third-Level}.
\end{proof}
Combining all the estimates in Proposition \ref{First-Level}, \ref{Second-Level} and \ref{Third-Level}, we prove that \eqref{final} holds and complete the whole proof of Theorem \ref{main-result}.
|
{
"timestamp": "2012-06-28T02:01:11",
"yymm": "1206",
"arxiv_id": "1206.6144",
"language": "en",
"url": "https://arxiv.org/abs/1206.6144"
}
|
\section{A Preliminary Introduction}
\IEEEPARstart{T}{he} theory of Localized Waves (LW), also called Non-Diffracting Waves, has been developed, experimentally verified
and generalized over the years. The application sectors may include diverse areas such as Optics, Acoustics and Geophysics. One
of their striking properties is that their peak-velocity can assume any value between zero and infinity\cite{hugo2008}.
The most important characteristic of the LWs, however, is that they are solutions to the wave equations capable of resisting the
effects of diffraction, at least up to a certain, long field-depth $L$. They exist both as localized beams and as localized
pulses. \ The LWs that have been more intensely investigated are the so-called ``superluminal" ones\cite{recami2009}, they being the easiest
to be mathematically constructed in closed form, and therefore experimentally generated. Let us recall that the first LWs
of this kind have been mathematically and experimentally created by J.-y. Lu et al. in Acoustics\cite{lu1992,lu1992b}
(when they are actually supersonic, and not superluminal), and used by J.-y. Lu et al. for the construction of a high-definition
ultrasound scanner, directly yielding a 3D image\cite{lu1994}.
More recently, also for the {\em subluminal} LWs various analytic exact solutions started to be found\cite{zamboni2008}.
Among the subluminal localized waves, the most interesting appear to be those corresponding to zero peak-velocity, that is,
those with a {\em static} envelope (within which only the carrier wave propagates): Such LWs ``at rest" have been called
{\em Frozen Waves} (FW) by us\cite{zamboni2004,zamboni2005,zamboni2006}. The FWs have been actually produced for the first
time, quite recently, in the sector of Optics\cite{tarcio2011}.
The above information confirms that LW pulses and beams can be used also in ultrasound imaging; but this is not as evident
for the FWs because of the {\em static} nature of their envelopes. What FWs make possible, by contrast, is the localization
of arbitrary spots of energy within a selected space interval $0 \leq z \leq L$.
Let us recall here some previous work done in connection with ultrasonic non-diffracting fields. This list,
obviously, is not at all exhaustive, but is merely illustrative of previous literature related with
pulses\cite{lu1992,lu1992b,lu1990,castellanos2010}, with single Bessel beams\cite{hsu1989,holm1998,nowack2012}, with
characteristics of the field emitted by flat annular arrays\cite{fox2002a,fox2002b}, and with scattering produced by
spherical objects\cite{mitri2009,mitri2010,mitri2011}. Many references therein could also be considered. \ Afterwards,
a lot of work has been produced
by investigating all the possible superpositions of Bessel beams obtained via integrations over their axicon angles
(that is, their speed) and/or frequencies and/or wavenumbers and/or phases, etc.: See, e.g., \cite{hugo2008,recami2009}
and the references quoted therein.
However, only few publications\cite{lu1997,zamboni2011} have addressed the superposition of Bessel
beams with the same frequency but with different longitudinal wavenumbers: In particular for obtaining Frozen
Waves\cite{zamboni2004,zamboni2005,zamboni2006}.
Purpose of this paper is to contribute to the last topic, by the application of a general procedure previously developed
by us for the generation of FWs (mainly in Optics), and then simulate the production of ultrasonic FW fields in water.
Namely, we shall use the methodology in Refs.\cite{zamboni2004,zamboni2005,zamboni2006},
that allows controlling the longitudinal intensity shape of the resulting fields, confined, as we were saying, inside a
pre-selected space interval
$0 \leq z \leq L$; where $z$ is the propagation axis, while $L$ can be much larger than the wavelength $\lambda$ of the
adopted ultrasonic monochromatic excitation. In practice, we shall perform appropriate superpositions of zero-order $(m=0)$
Bessel beams. The generated fields, apart from possessing a high transverse
localization, are endowed with the important characteristic that inside the chosen interval they can assume any desired shape:
For instance, one or more high-intensity peaks, with distances between them much larger than $\lambda$.\\
Before going on, let us add a couple of preliminary {\em comments} about the FWs. The first is that it would be of course possible
to use higher-order $(m\geqslant1)$ Bessel beams. In this case, however, it would be practically necessary a subdivision
of the radiator ring elements into small arc segments (i.e., a {\em segmented array\/}\cite{akhnak2002}), because of the
azimuthal phase dependence $e^{im\phi}$ of
those fields on the beam axis $z$. It appears to be more convenient tackling with such complexities in the aperture design only
after that an investigation of the cylindrically symmetric zero-order Bessel beam superpositions has been exploited. \
Anyway, we shall briefly come back, in Sec.2, to the higher-order Bessed beams question. \
The second comment is related to the flux of energy within a FW, especially in the realistic case of a finite aperture.
Let us go back for a moment to a (truncated) Bessel beam, by recalling first of all that its good properties are due
to the fact that, even in presence of diffraction, the ``intensity rings" that are known to constitute its transverse
structure (and whose values decrease when the spatial transverse coordinate $\rho$ increases) go on reconstructing the beam
itself all along a (large) depth of field. Namely, given a Bessel beam and a gaussian beam ---both with the same energy $E$,
the same spot $\Delta\rho_0$ and produced by apertures with the same redius $R$ in the plane $z=0$---, the {\em percentage}
of energy $E$ contained in the central peak region $0 \leq \rho \leq \Delta\rho_0$ is {\em smaller} for a Bessel rather than
for a gaussian beam: It is just such a different distribution of energy on the trasnsverse plane that causes the
{\em reconstruction} of the Bessel beam central peak even at large distances from the source, and even after an obstacle
with sizes smaller than the {\em aperture\/}'s. Such a property is possessed also by the other localized waves. \ In
other words, a certain energy {\em must} be contained in, and carried by, the side-lobes! Of course, such energy can be
reduced towards its minimum necessary value by suitable techniques: See, e.g., Refs.\cite{lu11,lu12}.
Also the FWs we are dealing with in this paper are non-diffracting beams: and we investigate their generation by a limited
aperture, so that their energy flux does remain finite. But they are just the energy contributions coming from the lateral
rings that strengthen the FW field pattern, in the $z$ direction, all along the FW field depth.
The paper is organized as follows: After this Introduction, Section 2 briefly describes the
methodology used for the generation of FWs. \ Then, in Section 3 the method for the computation of the ultrasonic fields
is presented; followed in Section 4 by a discussion about the annular radiator requirements. \
With respect to the last point, let us observe that, even if the precision requirements that one meets for the annular
transducer sizes are tight, and appear as a real challenge to be overcome, nevertheless it is encouraging to know that
various successes have been already obtained in similar situations, for example for monolithic\cite{lu1990},
piezocomposite\cite{akhnak2002}, and PVDF transducers\cite{Piezoflex}. More explicitly, let us mention that
ultrasonic beams have been technologically synthesized, by superposing Bessel radiations, in interesting works
like \cite{domell1982,foster1989,hsu1989,lu1994b,eiras2003,aulet2006,moreno2010,calas2010,castellanos2011}.
\ Finally, four simulated examples of ultrasonic FW fields in an ideal water-like
medium assuming no attenuation are presented in Section 5; while some conclusions appear at the end of the paper.
\section{Introduction to the Frozen Waves (FW)}
\noindent In this Section, a method for the generation of FWs is presented. For more details the reader is referred to
reference\cite{zamboni2005}. The main aim here is constructing, inside the finite interval $0\leq z \leq L$
of the propagation axis ($\rho=0$), a {\em stationary} intensity envelope with a desired shape, that we call $|F(z)|^2$.
To such a purpose, we shall take advantage of the field localization features of the axis-symmetrical zero-order
Bessel beams. \ Of course, we could start from higher order Bessel beams, and their relevant formalism. But, for simplicity,
as already said, we prefere to start from the FWs given by the following finite superposition of zero-order Bessel function
of the $1^{st}$ kind, all with the same frequency but with different (and still unknown) longitudinal wavenumbers $\beta_n \,$:
\begin{equation}
\label{eqn_1}
\Psi(\rho,z,t) = e^{-i\omega_0t} \sum\limits_{n=-N}^{N} A_n
J_0(k_{\rho n}\rho) e^{i\beta_n z} \ .
\end{equation}
\noindent In Eq.(\ref{eqn_1}), quantities $k_{\rho n}$ are the transverse wave numbers of the Bessel beams, linked to the
values of $\beta_n$ by the following relationship:
\begin{equation}
\label{eqn_2}
k_{\rho n}^2 + \beta_{n}^2 = \frac{\omega_0^2}{c^2} \ ,
\end{equation}
\noindent where $\omega_0=2\pi f_0$, is the angular frequency, and $c$ the plane wave phase velocity in the selected medium.
By the way, let us recall that we leave the frequency fixed, since we are considering beams (and not pulses): We vary,
afterward, the longitudinal wavenumber (and amplitude and phase) of each Bessel beam, so as to obtain the desired longitudinal
shape of the resulting beam.
It is important to notice that we restrict the values of the longitudinal wave numbers to the interval
\begin{equation}
\label{eqn_3}
0 \leq \beta_n \leq \frac{\omega_0}{c} \; ,
\end{equation}
\noindent in order to avoid evanescent waves and to ensure forward propagation only.
Our goal is now finding out the values of the $\beta_n$, and of the constant coefficients $A_n$ in Eq.\ref{eqn_1},
in order to reproduce approximately, inside the said interval $0\leq z \leq L$, the desired longitudinal intensity
pattern $|F(z)|^2$. \ In other words, for $\rho=0$ we need to have:
\begin{equation}
\label{eqn_4}
\sum\limits_{n=-N}^{N} A_n e^{i\beta_n z} \approx F(z) \ \ \ \ \text{with} \ 0\leq z \leq L \; .
\end{equation}
\noindent To obtain this, one possibility is to take $\beta_n=\frac{2\pi n}{L}$, thus obtaining a truncated Fourier series
that represents the desired pattern $F(z)$. This choice, however, is not very appropriate because of two
reasons: \ (i) it yields negative values for $\beta_n$ when $n<0$; which would imply backward propagations; \ (ii)
we wish to have $L\gg \lambda$, and in this case the main terms in the series could lead to very small values of the $\beta_n$,
resulting in a very short depth of field, and strongly affecting, therefore, the generation of the desired envelopes
far form the source. \ A possible way out is to take
\begin{equation}
\label{eqn_5}
\beta_{n} = Q + \frac{2\pi}{L}n \ ,
\end{equation}
\noindent where the value of $Q>0$ can be freely selected. Then, on introducing Eq.(\ref{eqn_5}) into Eq.(\ref{eqn_2}),
the transverse wavenumbers can be expressed as
\begin{equation}
\label{eqn_6}
k_{\rho n}^2 = \frac{\omega_0^2}{c^2} - \left(Q + \frac{2\pi}{L}n\right)^2 \; .
\end{equation}
\noindent Now, inserting again Eq.(\ref{eqn_5}) for $\beta_n$ into (\ref{eqn_1}) and putting $\rho=0$, one gets
\begin{equation}
\label{eqn_7}
\Psi(\rho=0,z,t) = e^{-i\omega_0t} e^{iQz}\sum\limits_{n=-N}^{N} A_n\;e^{i\frac{2\pi n}{L}z} \ ,
\end{equation}
\noindent with
\begin{equation}
\label{eqn_8}
A_n = \frac{1}{L} \int_{0}^{L} F(z)\;e^{-i\frac{2\pi n}{L}z} dz \ .
\end{equation}
\noindent Expressions (\ref{eqn_7}) and (\ref{eqn_8}) represent an approximation of the desired longitudinal pattern, since the
superposition in (\ref{eqn_1}) is necessarily truncated. However, we can control, and improve, the fidelity of the reconstruction
by varying the total number of terms $2N+1$ in the finite superposition by a suitable choice of the parameters $Q$ and/or
$L$. \ The complex coefficients $A_n$ in (\ref{eqn_8}) forward the final amplitudes and phases for each one of the Bessel
beams in Eq.(\ref{eqn_1}); and, because we are adding together zero-order Bessel functions, we can expect a high
degree of field concentration around $\rho=0$.
The methodology introduced here deals with control over the longitudinal intensity pattern. Obviously, we cannot get a total
3D control, owing to the fact that the field must obey the wave equation. However, we can get some control over the
transverse spot size through the parameter $Q$. \ Actually, Eq.(\ref{eqn_1}), which defines our FW, is a superposition of
zero-order Bessel beams, and therefore we expect that the resulting field possesses an important transverse localization
around $\rho=0$. Each Bessel beam in superposition (\ref{eqn_1}) is associated with a central spot with transverse width
$\Delta\rho_n \approx 2.4/k_{\rho n}$; so that, on the basis of the expected convergence of series (\ref{eqn_1}), we can
estimate the radius of the transverse spot of the resulting beam as being:
\begin{equation}
\label{eqn_9}
\Delta \rho \approx \frac{2.4}{k_{\rho,n=0}} = \frac{2.4}{\sqrt{\omega_0^2/c^2 - Q^2}} \ .
\end{equation}
Let us explicitly notice, at this point, that one can increase the control over the transverse intensity pattern
of the resulting beam by having recourse to the already mentioned higher order Bessel beams, \ $A_n \exp(-i\omega t)
J_{\nu}(k_{\rho\,n}\rho)\exp(i \beta_n z)$, \ with $\nu \geq 1$, \ in the superposition (\ref{eqn_1}),
keeping the same values of the $A_n$ given by Eq.(\ref{eqn_8}), and of the $\beta_n$ given by Eq.(\ref{eqn_5}). \
On doing this, the longitudinal intensity pattern can be shifted from $\rho = 0$ to a cylindrical surface
of radius $\rho = \rho'$, which can be approximately evaluated through the relation
$$(\frac{d}{d\rho}J_{\nu}(\rho \sqrt{\omega^2/c^2 - Q^2}))|_{\rho=\rho'} = 0 \; .$$
\noindent This allows one to get interesting nondiffracting fields, modelled over cylindrical surfaces. Some details can
be found in Ref.\cite{zamboni2005}.
Anyway, relationship (\ref{eqn_9}) itself is rather useful, because, once we have chosen the desired
longitudinal intensity pattern $|F(z)|^2$, we can also choose the size of the transverse spot ($2\Delta\rho$),
and then use Eq.(\ref{eqn_9}) to evaluate the corresponding, needed value of $Q$.
Such a method for creating FWs depends on the nature of the waves to be used. For instance, for an efficient generation in
Optics, a recourse to ordinary lenses is required even when using the classical method with an array of so-called Durnin et al.'s
circular slits\cite{sheppard1,sheppard2,durnin1987}.
In several other cases, optical axicon lenses have been used\cite{McLeod1,McLeod2}
even if, to say the truth, axicon devices had been already proposed since long time\cite{UltrasAxic}
in Acoustics too. Of course, whenever possible, one may have recourse also to holographic elements, or suitable mirrors, etc. \
For instance, optical FWs were produced experimentally (for the first time) by using computer generated holograms and a
spatial light modulator\cite{tarcio2011}.
In the case of ultrasound, we may adopt annular transducers composed of many narrow rings. Each one of them will
have to be exited by a sinusoidal input having a particular amplitude and phase. \ Section 4 will discuss these points
in more detail.
\section{Method for Calculating Ultrasonic Fields}
\noindent This Section summarizes the well known impulse response (IR) method, which is the mathematical
basis used in this work for the computation of the ultrasonic frozen waves. \ Such a spatial impulse response method
has recourse to the linear system theory to separate the temporal from the spatial characteristics of the acoustic field. \
Then, the aperture IR function can be derived from the Rayleigh-Sommerfeld formulation of diffraction,
that is, from the Rayleigh integral\cite{stepanishen1971,harris1981,jensen1992,goodman2005}, via the
following expression:
\begin{equation}
\label{eqn_10}
h(\mathbf{r_1},t) = \int_{\mathcal{S}}
\frac{\delta\left(t-\frac{|\mathbf{r_1-r_0}|}{c}\right)}{2\pi |\mathbf{r_1-r_0}|} d\mathcal{S}\ .
\end{equation}
\noindent Here the radiating aperture $S$ is mounted on an infinite rigid baffle; quantity $c$ being now the speed of sound,
and $t$ the time. The spatial position of the field point is designated by $\mathbf{r_1}$, while $\mathbf{r_0}$ denotes
the location of the radiating aperture.
The integral \ref{eqn_10} is basically the statement of Huyghens principle, and evaluates the acoustic
field, i.e., the field pressure relative to the unperturbed static pressure $P_0$, by adding the spherical wave
contributions from all the small area elements that constitute the aperture. \ This process can also be reformulated
by using the acoustic reciprocity principle, and then constructing $h$ by finding out the part of the spherical waves
that intersect the aperture. \ This implies that the IR function $h$ depends both on the form of the radiating element and
on its relative position w.r.t. (with respect to) the point where the acoustic pressure is calculated. \ The IR formulation assumes
a flat or gently curved aperture (that is, the aperture dimensions are large compared to the wavelength), which
radiates into a homogeneous medium with no attenuation,\footnote{It is possible to include the attenuation of the medium by
filtering the Fourier transform of $h$ by a frequency dependent attenuation function\cite{harris1981,jensen1993}.} and operates in
a linear regime.
On using the spatial impulse response $h(x,y,z,t)$, the final acoustic pressure can be written as\cite{harris1981,jensen1993}:
\begin{equation} \label{eqn_11}
p(x,y,z,t) = \rho_{m} \frac{\partial v(t)}{\partial t} * h(x,y,z,t) \ ,
\end{equation}
\noindent where $\rho_{m}$ is the density of the medium, and the symbol star, $*$, denotes the convolution in time of the
derivative of the velocity surface signal $v$, for a radiating element with aperture's IR function $h$.
The above procedure was implemented into a matlab program, employing the DREAM toolbox\cite{piwakowsky1989,piwakowsky1999}
for the calculation of the IR of the rings that compose the annular aperture. The steps followed by the program for computing
the acoustic fields can be summarized as follows:
\begin{enumerate}
\item Compute the theoretical FW desired pattern.
\item Determine the transducer requirements.
\item Sample the amplitude \& phase FW profiles at $z=0$.
\item Assign sinusoidal $v_k$ for ring $n_k$ using sampled values.
\item Sweep field points $x_{i,j};y_{i,j};z_{i,j}$ for transducer ring $n_k$.
\item At point $P_{(i,j)}$ calculate $h$ and $p_{(i,j)} = \rho_{m} \dot{v}_k*h$.
\item Accumulate pressure for ring $n_k$ \& go back to step $4$.
\item Store and display results.
\end{enumerate}
\noindent The medium selected for the simulations was an ideal water-like lossless medium,\footnote{The case
including attenuation will be investigated in a forthcoming paper.} having a constant propagation
speed of $c=1540 \;$m/s [which is an average\cite{rossing2007} among $c_{{\rm fat}}=1450\;$m/s, \ $c_{{\rm blood}}=1575\;$m/s,
and $c_{{\rm muscle}}=1600\;$m/s].
\section{Aperture Dimensioning}
\noindent In this Section we discuss some dimensioning aspects of an annular aperture for creating the FW fields.
They include: \ i) the selection of the transducer size; and \ ii) the number and dimensions of the rings.
\subsection{Transducer size}
\noindent The first element to be considered is the minimum aperture radius, $(R_ {{\rm min}})$, required for the generation of the
desired ultrasonic FW fields. \ The selection of this value, however, is not entirely free, because it depends on the chosen
$N$, that determines the number ($2N+1$) of Bessel beams added in Eq.(\ref{eqn_1}). \ From Eqs.(\ref{eqn_3})
and (\ref{eqn_5}) we can write:
\begin{align}\label{eqn_12}
\beta_ {{\rm max}} &= Q + \frac{2\pi N}{L} \leq \frac{\omega_0}{c} \\
\beta_ {{\rm min}} &= Q - \frac{2\pi N}{L} \geq 0 \notag \; .
\end{align}
\noindent Afterwards, if we put $\beta_ {{\rm max}}=\frac{\omega_0}{c}$ into Eqs.(\ref{eqn_12}), and subtracts both sides of the
equations, we get
\begin{equation}\label{eqn_13}
\beta_ {{\rm min}}=\frac{w_0}{c}-\frac{4\pi N}{L} \ .
\end{equation}
\noindent The equation for the maximum axicon-angle is thus
\begin{equation}\label{eqn_14}
\tan \theta_ {{\rm max}} = \frac{R_ {{\rm min}}}{L} = \frac{k_{\rho_ {{\rm max}}}}{\beta_ {{\rm min}}}\ .
\end{equation}
Inserting the expression for $k_{\rho}$ from Eq.(\ref{eqn_2}) into Eq.(\ref{eqn_14})
and solving it for $\beta_ {{\rm min}}$, we get:
\begin{equation}\label{eqn_15}
\beta_ {{\rm min}} = \frac{\frac{w_0}{c}}{\sqrt{1+\frac{R_ {{\rm min}}^2}{L^2}}}\ .
\end{equation}
Finally, is we equate expressions (\ref{eqn_13} and \ref{eqn_15}), we obtain for $R_ {{\rm min}}$ the expression
\begin{equation}\label{eqn_16}
R_ {{\rm min}} = L \sqrt{\frac{\frac{w_0^2}{c^2}}{\left(\frac{w_0}{c}-\frac{4\pi N}{L}\right)^2} -1} \ ,
\end{equation}
\begin{figure}[t]
\centering
\includegraphics[width=3.5in]{fig1}
\caption{Relationship between the aperture radius $R$ and the summation limits $N$ entering Eq.(\ref{eqn_4}), in terms of the
parameters $L$ and $w_0=2\pi f_0$. Continuous lines refer to frequency $f_0=2.5\;$MHz, while dashed lines refer to
$f_0=4\;$MHz.}
\label{fig_1}
\end{figure}
\noindent which does allow the estimation of the minimum aperture-radius for the selected value of $N$, with $\omega_0$ and $L$
as parameters.
If requisite (\ref{eqn_16}) is not fulfilled, and $R<R_ {{\rm min}}$, the resulting FW pattern may result distorted. This is because
the higher is the precision desired for the FW, or the larger its desired maximum distance $L$, the higher will be the values
needed for $N^\uparrow$ and the aperture radius $R^\uparrow$. This is a logical conclusion, if we think the frozen waves
as nothing but the realization of Huyghens principle for constructive/destructive interference. \ We can also get the above
mentioned dependence by putting $\beta_ {{\rm min}}=0$ in Eq.(\ref{eqn_13}), and then solving it for $N$, so as to obtain
the \emph{maximum} number of allowable terms $N_ {{\rm max}}$ (this implies setting $Q=\frac{w_0}{2c}$) in the Fourier
superposition:
\begin{equation}\label{eqn_17}
N \leq N_ {{\rm max}} = \frac{L\omega_0}{4\pi c} \ .
\end{equation}
\begin{figure}[t]
\centering
\includegraphics[width=3.5in]{fig2}
\caption{Relationship between the aperture radius $R$ and the number $N$ which determines the number of the
Fourier terms entering Eq.(\ref{eqn_4}), for a fixed distance $L=0.040\;$m but for various values of $f_0$.} \label{fig_2}
\end{figure}
However, care has to be taken when using using this limiting value, since usually it leads to unpractical transducer
sizes ($R > 50\;$mm), with too many rings ($N_r>100$).
Relationship (\ref{eqn_16}) is at work in Figures \ref{fig_1} and \ref{fig_2}, where one of the parameters ($L$ or $w_0$)
is alternatively fixed, while the other is left variable.\footnote{We'd like to point out in these Figures that
the estimated size of the spot-radius given by Eq.(\ref{eqn_9}) changes when $N$ is varied. This is because $Q$ disappears
during the derivation of Eq.(\ref{eqn_16}). However, for the current setting, the changes are not significant
and are in the range \ $0.3 \leq \Delta\rho \leq 0.5\;$mm.} Both Figures clearly show how the value of
$R$ rapidly increases when $N$ is increased. \ By contrast, Figure \ref{fig_1} depicts Eq.(\ref{eqn_16}) for different values
of $L$, using the two frequencies $f_0=2.5\;$MHz, \ and \ $f_0=4\;$MHz; \ and show how in both cases, up to a certain value
of $N$ [e.g. $N\thickapprox 15$ for the first case, continuous lines], the value of $R$ almost does not change when the
distance $L$ is increased.
The rapid increase of $R$ with $N$ can be partially mitigated by raising the working frequency
$f_0^\uparrow$, as shown in Fig.\ref{fig_2}, were a fixed distance $L=0.040\;$m is assumed. \
This option, however, has the effect of requiring smaller ring-widths ($d^\downarrow$) for a good generation of the
fields.
In Fig.\ref{fig_3} we can see how the frequency affects the {\em simulated} profiles, in the simple case of an ideal
FW consisting of two step-functions only [a situation better exploited in Case 1 of Sec.5: Cf. Eq.(17) below]. The
three intensity profiles simulated in Fig.\ref{fig_3} have been obtained by varying the working frequency, always keeping the
same aperture. \ Notice how, for frequencies larger than $f\approx2.5\;$MHz, any increase in the frequency distorts
more and more the envelope of the desired FW. \
This effect is due to two causes: First, the dimensions of the rings are not changed, and , second, the patterns that result
from the beam superpositions in Eq.(\ref{eqn_1}) are influenced by the approximately linear relationship existing between the
ultrasound wavelength and the average distance among the sidelobe peaks of the superposed Bessel functions. \
At the end, the combination of these two causes produce a destructive interference effect on the generated FW.
The effect of changing the emitter radius $R$ can be observed in Figure \ref{fig_4}, which still refers to the
FW considered in the previous graphic: That is, to an ideal FW consisting of two step-functions
only [a case better exploited, let us repeat, in Figure \ref{fig_8} of Case 1 in Sec.5: Cf. Eq.(17) below]. \
This time, the three simulated intensity profiles have been obtained using the different transducer radii\ $R_1=40\;$mm, \
$R_2=35\;$mm \ and \ $R_3=30\;$mm. \ Other parameters used for the simulation are: \ $L=30\;$mm, \ $d=0.3\;$mm, \
$\Delta_d=0.05\;$mm, \ $N=9$ \ and \ $f_0=2.5\;$MHz. \
Notice how, when the radius is reduced (e.g., $R=30\;$mm), the FW intensity profile gets more distorted with respect
to the ideal envelope.
\begin{figure}[t]
\centering
\includegraphics[width=3.25in]{fig3}
\caption{Side view of the three {\em simulated} profiles, for a simple (ideal) FW consisting of two step-functions only
[see case Case 1 of Sec.5: \ Cf. Eq.(17) below], obtained by varying the working frequency. The parameter settings
are the following: \ $N=12$; \ $R=35\;$mm; \ $d=0.3\;$mm; \ $\Delta_d=0.05\;$mm; \ $N_r=101$; \ $L=30\;$mm, \ and \ $f_0=2.5\;$MHz}
\label{fig_3}
\end{figure}
\begin{figure}[t]
\vspace{-0.5mm}
\centering
\includegraphics[width=3.25in]{fig4}
\caption{Comparison of of the simulated profiles, of the FW considered in Fig.3, obtained now by using three different values
for the transducer radius $R$. \ Settings: $N=9$; \ $d=0.3\;$mm; \ $\Delta_d=0.05\;$mm; \ $L=30\;$mm; \ $f_0=2.5\;$MHz; \
$N_{r1}=115$; \ $N_{r2}=101$, \ and \ $N_{r3}=86$.}
\label{fig_4}
\end{figure}
\begin{figure}[t]
\vspace{-0.5mm}
\centering
\includegraphics[width=3.25in]{fig5}
\caption{Comparison of the simulated FW profiles when using different values for $d; \ \Delta_d$. \ Settings: \
$R=35$\;mm; \ $N=9$; \ $L=30$\;mm; \ $f_0=2.5$\;MHz; \ $N_{r1}=101$; \ $N_{r2}=88$; \ $N_{r3}=88$\ and $N_{r4}=117$.}
\label{fig_5}
\end{figure}
\subsection{Dimensions of the Rings}
\noindent The next step in the definition of the annular radiator is the dimensioning of the transducer rings. This means
determining: \ (i) the width \ $d$, \ and \ (ii) the inter-ring spacing, or kerf, $\Delta_d$.\\
On this respect, one meets a practical problem when the required distances are very small, for instance
sub-millimetric dimensions. In this case, the technology employed in the annular transducer fabrication
may strongly influence its performance. \
To illustrate this effect on the generated ultrasonic fields, in Figure \ref{fig_5} we show how small changes
in $d$ or $\Delta_d$ can lead to severe distortions in the profiles (and the same will be true, e.g., for Fig.8 below).
The corresponding parameters ($d_i; \ \Delta_{d_i}$) chosen for this Figure are: First choice: ($0.30; \ 0.05$) mm; \ Second
choice: ($0.35; \ 0.05$) mm; \ Third choice: ($0.30; \ 0.10$) mm; \ and Fourth choice: ($0.25; \ 0.05$) mm.
\hyphenation{cri-ti-cal}
One can observe how, for dimensions larger than $d\approx0.30$\;mm and $\Delta_d\approx 0.05$\;mm, the FW envelopes gets
heavily modified. This phenomenon is probably related to the above mentioned causes of destructive interference, as well as
to a near-field effect of the radiator. One can attempt at partially overcome it by increasing the maximum distance
$L^\uparrow$ of the FW fields.
\begin{figure}[t]
\centering
\includegraphics[width=3.5in]{fig6}
\caption{Sampling of the ideal symmetric {\em amplitude} pattern, for the generation of the field
in Fig.\ref{fig_8} below. The dots indicate the sampled values used for the excitation of the rings.}
\label{fig_6}
\end{figure}
\begin{figure}[t]
\centering
\includegraphics[width=3.5in]{fig7}
\caption{Sampling of the ideal symmetric {\em phase} pattern, for the generation of the field in the following Fig.\ref{fig_8}.
The dots indicate the sampled values used for the excitation of the transducer rings.}
\label{fig_7}
\end{figure}
What discussed above leads to the following question: What are the dimensions of $d$ and $\Delta_{d}$ that will permit us
to generate many different ultrasonic FW patterns with the same transducer? \ Stated in another way: What will be the
ring dimensions ($d; \ \Delta_{d}$) that best comply with a predefined set of FWs to be generated? \
To address these questions, one has first to consider the sampling process involved during the generation of the frozen waves.
This is a critical point in order to attain a proper excitation while minimizing the hardware cost.
Indeed, each transducer ring will require in practice one electronic emission channel in order to
operate.\footnote{It should be remembered that we are trying to generate localized energy spots be means of the FW, and not
dealing with ultrasound imaging.} \ For instance, the number of rings ($N_r$) and emission channels needed for the physical
realization of the patterns shown in Figures \ref{fig_8}--\ref{fig_15} is in practice of $N_r=101$; which means a considerable
amount of electronic hardware for any application. \ In conclusion, besides the required electronics,
the definition of the annular transducer itself is of key importance, and perhaps the major issue to be considered at
first in any particular application\cite{recami2011}.
As mentioned earlier, the problem of the rings definition implies in its turn the sampling procedure. An example of
this process is shown in Figs. \ref{fig_6} and \ref{fig_7}; both corresponding to the FW envelope of Fig. \ref{fig_8}
described in Case 1 of Sec.5. \
Here the theoretical FW profiles of amplitude and phase at the aperture (i.e., at $z=0$) are sampled by adopting a
constant-step approach. The sampled values are then used as the final amplitudes and phases of the sinusoidal excitations
for each one of the transducer rings. \ The sampling process has other alternatives to be adopted, as for example a
non-constant step spacing. However, we prefer to adopt in this paper the rather simple approach of constant sampling,
leaving other possibilities for future work. In other words, once the values for $d$ and $\Delta_d$ have been assigned, we use
them for a radial {\em sampling} of the theoretical FW patterns at $z=0$: That is to say, each sample value corresponds
here to the {\em center} of each ring (i.e., the center of the segment associated to the ring width $d$).
\begin{figure}[t]
\vspace{-0.5mm}\centering
\includegraphics[width=3.5in]{fig8}
\caption{The theoretical FW pattern chosen in the Case 1 of Sec.5. \ Settings: $N=12$; \ $R=35\;$mm; \ $d=0.3\;$mm; \
$\Delta_d=0.05\;$mm; \ $N_r=101$; \ $L=30\;$mm; \ and \ $f_0=2.5\;$MHz.}
\label{fig_8}
\end{figure}
\begin{figure}[t]
\vspace{-0.5mm}\centering
\includegraphics[width=3.5in]{fig9}
\caption{The FW pattern, corresponding to the previous Figure, that is, to Case 1 of Sec.5, obtained by our simulated experiment.
The parameters are set as in Fig.3: Namely, \ $N=12$; \ $R=35\;$mm; \ $d=0.3\;$mm; \ $\Delta_d=0.05\;$mm; \ $N_r=101$; \
$L=30\;$mm \ and \ $f_0=2.5\;$MHz.}
\label{fig_9}
\end{figure}
\section{Simulation of FWs: \ Results}
\noindent To further demonstrate the possibilities offered by the ultrasound frozen waves, in this Section we present
four different simulations of ultrasonic FWs in a water-like medium, with high transverse localization [at this stage, we prefer to test simple
arbitrary patterns]. \ As mentioned at the beginning, we assume in this paper a homogeneous medium, with no
attenuation effects, operating in a linear regime; the sound speed being $c=1540\;$m/s. \ In all cases, the same
annular aperture is used, with $N_r=101$ rings endowed with width \ $d=0.3\;$mm \ and \ kerf of $\Delta_d=0.05\;$mm. \
The operating frequency is fixed at $f_0=2.5\;$MHz, which is an adequate value for use in Medicine, corresponding to a
wavelength of about $\lambda\backsimeq0.62\;$mm. \ The only parameters varied during our simulations, apart from the FW pattern
itself, are the maximum allowable distance or field-depth $L$, and the value of $N$ determining the number, $2N+1$, of Bessel
beams in Eq.(\ref{eqn_1}).
The double Figures associated with this Section 5 (namely, Figs.8-9, 10-11, 12-13, 14-15) depict, first, the theoretical
pattern to be constructed (that is, a $3$D plot of the chosen FW); and, second, the result of the corresponding impulse
response simulation; respectively. The sampling frequency used in the IR method is $f_s=100\;$MHz.
Let us comment about the approximate generation ---by our simulated experi-ment--- of the chosen FWs. Besides playing with
the value of $N$, which enters relation (\ref{eqn_16}), we used the tool of pushing the value of $N$ a little bit up,
for a better reconstruction of the desired ideal intensity patterns $|F(z)|^2$. \
In fact, we are still using for the aperture the same size, $R=35\;$mm, that is to say a diameter $\varnothing = 70\;$mm. \
Then, if $N^{\uparrow}$ is moderately increased, say for instance from $9$ to $12$, this trick will moderately enhance the
reconstruction of the pattern. Care should be taken, however, when the maximum range $L$ is augmented, because the pattern
would start distorting. Such an effect can be appreciated in Fig.\ref{fig_13} where the FW peak, near $z\approx 55$\;mm,
starts to loose amplitude. \
Another issue we like to comment about changing $N$, is the variation in the size of the spot-radius of the FW given
by Eq.(\ref{eqn_9}). \ However, in the present context this has not much concern, because all spots are below $1$\;mm. \
It is also interesting to compare the long depth of field of the generated FW, with that of a gaussian
beam\footnote{The diffraction length is give by \ $z_{\text{dif}}=\sqrt{3}k_0\frac{\Delta\rho_0^2}{2}$. \ See pp.8-9 of
Ref.\cite{hugo2008}.}, which in the best case with the current setting would be $z_{\text{dif}}\backsimeq 4$\;mm.
Because of the computer time employed by the IR simulations, the details in the corresponding spatial grids ($\rho,z$)
had to be reduced w.r.t. the ideal patterns. Then, suitable intervals of $\Delta_z=0.25\;$mm \ and \ $\Delta_{\rho}=0.15\;$mm
have been selected for the simulated plots. \ Also, due to the adoption of colors for the matlab graphs (absent, however, in the
version printed on paper), an effect was added to enhance the visibility of the smaller details of the patterns.
\begin{figure}[t]
\centering
\vspace{-2.5mm}
\includegraphics[width=3.5in]{fig10}
\caption{The theoretical FW pattern chosen in Case 2. \ Settings: $N=12$; \ $R=35\;$mm; \ $d=0.3\;$mm; \ $\Delta_d=0.05\;$mm; \
$N_r=101$; \ $L=40\;$mm; \ and \ $f_0=2.5\;$MHz.}
\label{fig_10}
\end{figure}
\begin{figure}[t]
\centering
\vspace{-2.5mm}
\includegraphics[width=3.5in]{fig11}
\caption{The FW pattern, corresponding to the previous Figure, that is, to Case $2$, obtained by our
simulated experiment. The settings are the same as in the previous Figure.}
\label{fig_11}
\end{figure}
\subsection{Case 1}
\noindent As a first choice, the FW to be reproduced consists in two step-functions with different amplitudes. The maximum
distance selected for this case is $L=30\;$mm, \ the number of the Bessel beams to be superposed being $2N+1=25$ . The
the spot size is approximately $2\Delta\rho_1 \backsimeq 0.72$\;mm. The corresponding envelope function $F_1(z)$ is
therefore
\begin{equation}\label{eqn_18}
F_1(z)= \begin{cases}
0.5 \ &\text{for $l_1 \leq z \leq l_2$}\\
1 \ &\text{for $l_3 \leq z \leq l_4$}\\
\end{cases}
\end{equation}
\noindent with $l_1=L/6$, \ $l_2=2L/6$, \ $l_3=3L/6$ \ and \ $l_1=5L/6$; \ the field being zero elsewhere.
The theoretical pattern is shown in Fig.\ref{fig_8}, while the IR simulation is shown in Fig.\ref{fig_9}.
Notice how the peaks and the valleys of the ideal FW (which corresponds to $N=12$) are clearly emulated by the
results of our simulated experiment depicted in Fig.\ref{fig_9}. Only in the region near $z\approx 5\;$mm the
simulation begins to deviate from the theoretical behavior.
\begin{figure}[t]
\centering
\includegraphics[width=3.5in]{fig12}
\caption{The theoretical FW pattern chosen in Case 3 (see the text). Settings: $N=11$; \ $R=35\;$mm; \ $d=0.3\;$mm; \
$\Delta_d=0.05\;$mm; \
$N_r=101$; \ $L=60\;$mm, \ and $f_0=2.5\;$MHz.}
\label{fig_12}
\end{figure}
\begin{figure}[t]
\centering
\vspace{-2.2mm}
\includegraphics[width=3.5in]{fig13}
\caption{The FW pattern, corresponding to the previous Figure, that is, to Case 3, obtained by simulated experiment.
The settings are as above.}
\label{fig_13}
\end{figure}
\subsection{Case 2}
\noindent The second pattern, selected for the FW to be created, consists in a concave-shaped region, whose envelope
function is therefore
\begin{equation}\label{eqn_19}
F_2(z)= 1 + 2.5\frac{(z-l_1)(z-l_2)}{(l_2-l_1)^2} \ \ \ \text{for $l_1 \leq z \leq l_2$}
\end{equation}
\noindent with $l_1=2L/8$ and $l_2=6L/8$. The maximum distance adopted in this case is $L=40\;$mm, and the number of the
superposed Bessel beams is again $2N+1=25$. This corresponds now to a transverse spot of $2\Delta\rho_2 \backsimeq 0.82$\;mm.
Figures \ref{fig_10} and \ref{fig_11} show the corresponding theoretical and simulated ultrasonic field, respectively.
Again, a good match is obtained between the theoretical and the simulated FW, notwithstanding the rather strange look of the
pattern.
\subsection{Case 3}
\noindent This case corresponds to the field depicted in Figures \ref{fig_12} and \ref{fig_13}. Here we purpose to create
two small convex, or peaked, field regions, followed by a constant-field finite region (a ``step"); the field being assumed to be
zero elsewhere. \ The envelope function
employed this time is therefore given by (the field, let us repeat, being otherwise {\em zero\/}):
\begin{equation}\label{eqn_20}
F_3(z)= \begin{cases}
-4\frac{(z-l_1)(z-l_2)}{(l_2-l_1)^2} \ \ &\text{for $l_1 \leq z \leq l_2$}\\
-4\frac{(z-l_5)(z-l_6)}{(l_6-l_5)^2} \ \ &\text{for $l_3 \leq z \leq l_4$}\\
\ \ \ \ \ \ \ \ \ 1 \ \ &\text{for $l_5 \leq z \leq l_6$}\\
\end{cases}
\end{equation}
\noindent with $l_1=0.5L/8$, \ $l_2=1.5L/8$, \ $l_3=2L/8$, \ $l_4=3L/8$, \ $l_5=5.5L/8$ \ and \ $l_6=7.5L/8$. The
obtained spot-size is $2\Delta\rho_3 \backsimeq 0.98$\;mm.
As we stated before, we can see the effect on the second peaked region produced by augmenting the number of the beams, which
from Eq.\ref{eqn_16} should be now $N'=11$ instead of $N\simeq 7$. Also, the field-depth is now larger, that is, $L=60\;$mm.
\begin{figure}[t]
\centering
\includegraphics[width=3.5in]{fig14}
\caption{The theoretical FW pattern of Case 4. Settings: $N=8$; \ $R=35\;$mm; \ $d=0.3\;$mm; \ $\Delta_d=0.05\;$mm; \ $N_r=101$; \
$L=80\;$mm, \and \ $f_0=2.5\;$MHz.}
\label{fig_14}
\end{figure}
\begin{figure}[t]
\centering
\includegraphics[width=3.5in]{fig15}
\caption{The FW pattern, corresponding to the previous Figure, that is, to Case 4, obtained by our
simulated experiment. The settings are as above.}
\label{fig_15}
\end{figure}
\subsection{Case 4}
\noindent The last example corresponds to an exponentially growing FW, with a maximum distance now even larger, of $L=80$\;mm.
This is a rather important example, since it is directly related to the generation of a FW in an attenuating medium. The
associated envelope function $F_4(z)$ is now
\begin{equation}
\label{eqn_21}
F_4(z)= \frac{e^{\frac{3z}{L}}}{5}\ \ \ \ \ \text{for $l_1 \leq z \leq l_2$}\\
\end{equation}
\noindent with $l_1=0.5/8$ and $l_2=7.5L/8$.
In order to enhance the fidelity in the reconstruction of the ideal $F_4(z)$ envelope, and at the same time to reduce
the effects produced by using augmented $N^{\uparrow}$ and $L$ values, the number of the superposed Bessel beams
is now taken to be $17$ \ ($N=8$). \ The transverse spot-size this time results to be $2\Delta\rho_4 \backsimeq 1.36$\,mm.
Notice how the ultrasonic field in Fig.\ref{fig_15} simulates in a pretty good way the theoretical $3$D plot of the FW
in \ref{fig_14}. Good results of this type encourage to generate frozen waves,
even in attenuating media.
\section{Some Conclusions and Prospects}
\noindent In this paper we have shown how adequate superpositions of zero order Bessel beams can be used for
constructing ultrasonic wavefields with a {\em static} envelope, within which only the carrier wave propagates;
for simplicity, we have here assumed a water-like medium, disregarding attenuation. \ Indeed, we have demonstrated by
simulated experiments, via the impulse response method, that suitable sets of annular apertures can
produce ultrasonic fields ``at rest'': which have been called ultrasonic {\em frozen waves} (FW) by us. Such FWs belong
to the realm of the so-called localized waves (LW), or Non-diffracting Waves, which are soliton-like solutions to the
ordinary {\em linear} wave equation which since long time have been shown theoretically and experimentally to be endowed with
peak-velocities $V$ ranging from $0$ to $\infty$ \cite{hugo2008, recami2009}.
The FWs, among the sub-luminal (or rather sub-sonic, in our case) LWs\cite{zamboni2008}, are the ones
associated\cite{zamboni2004,zamboni2005} with $V=0$.
An important characteristic of the FWs is that they can be constructed, within a prefixed spatial interval
$0 \leq z \leq L$, with any desired longitudinal intensity shape, maintaining at the same time a high transverse localization.
This has been verified through the size of the intensity {\em spots} of the generated FWs, which have almost reached the
diffraction limit $\lambda$. All this can be monitored by appropriate selection of the parameter $Q$.\\
We also pointed out how ---when adopting for instance sets of annular radiators--- a proper generation of
ultrasonic FWs with a given frequency requires careful attention to the dimensioning of the annular transducers. \
Namely, we have discussed how the transducer radii ($R$), the operating frequency ($f_0$),
and the ring dimensions (width$=d$ and kerf$=\Delta_d$) affect the generated FW fields. \ We have shown, in particular,
how a rise in frequency allows improving the fidelity of the generated FWs while
keeping the emitter radii $R$ unchanged, and just working on (augmenting) the parameter $N$; even if
such an increase of $N$ will obviously impose sharper requirements on the emitter ring-elements.
Associated to the problem of defining the ring sizes, another important issue we have pointed out is the required
sampling process of the FW patterns at the aperture: Something that one must bear in mind when designing the annular arrays
for the considered FW. \ To address this point, we explored in this article a simple constant-step sampling approach,
leaving other alternatives for future work, in which we also intend to consider pulsed exitations\cite{lu1992b,castellanos2010}
as a possible way for surmounting the known side-lobe problem, met with the use of Bessel beams for focusing ultrasonic
energy in CW (continuos wave) mode.
As a merely complementary issue, we discussed incidentally the effect of raising even more the value of
$N^{\uparrow}$ [instead of sticking to the value yielded by Eq. (\ref{eqn_16})] for getting in more detail
the ideal envelope $F(z)$ (see Section 2), neglecting for the moment the transverse-spot size changes.
To spend a few more words on the practical realization of acoustic FWs, let us recall that significant efforts
from the point of view of the hardware could be requested. We feel that the main difficulty is linked however
with proper choice, and development, of the ultrasonic transducers themselves: Namely, of transducers with
suitable dimensions and $\lambda/4$ adaptation layer(s), as well as with a low parasitic inter-element {\em cross-coupling}
(e.g., lower than $-35$\,dB). According to us, with one such transducer in the hands, acoustic FWs can be experimentally
tested. Further work and attention will have to be devoted, in any case, to ease up the mentioned technological complexities,
and costs, which in principle can be involved in the construction of the suitable CW driving electronics for
the required annular array. Without forgetting that we confined ourselves, for obvious reasons, to study the region located
between the aperture and the propagating medium, disregarding at this stage {\em possible} issues referring to the
transfer functions associated with each one of the annular electromechanical elements: Something that could have indeed
its role when aiming at an efficient implementation with pulsed HV exitations.\ In the present work, the latter problems
do not show up, in practice, since we refer to operation in the CW regime; so that all the ring elements, plus the channels of
the electronic front-end (LECOUER tm), are tuned at the resonant frequency of the array piezoelectric dye.
Many applications of the localized FWs in various sectors of science are possible. In the case of
ultrasonic FWs, let us mention for example new kinds of acoustic tweezers, bistouries, and other possible medical apparatus
for the treatment of affected tissues. With respect the last point, we believe that FWs can offer for example
an alternative to HIFU\footnote{High-Intensity Focused Ultrasound.}, since they allow an extended control and modeling
of the longitudinal field envelope. A further advantage of acoustic FWs is that they don't have to deal
with the drawback, of relative small treatment volume versus relatively large access window, usually
met\cite{rossing2007} by the HIFU techniques.
\section{Acknowledgments}
\addcontentsline{toc}{section}{Acknowledgment}
\noindent The authors thank Dr. Antonio Ramos, from CSIC, Spain, and Dr. Jos\'{e} J. Lunazzi, from
UNICAMP, Brazil, for their kind help and interest. They are moreover grateful to the Topical Editor
for kind attention, and to the anonymous Referees for useful comments. \ This work was supported by FAPESP, Brazil
(as well as, partially, by CAPES and CNPq, Brazil, and INFN, Italy).
\ifCLASSOPTIONcaptionsoff
\newpage
\fi
|
{
"timestamp": "2013-05-15T02:00:46",
"yymm": "1206",
"arxiv_id": "1206.5995",
"language": "en",
"url": "https://arxiv.org/abs/1206.5995"
}
|
\section{Introduction}
The inequality of Ostrowski \cite{Ostrowski} gives us an estimate for the
deviation of the values of a smooth function from its mean value. More
precisely, if $f:[a,b]\rightarrow \mathbb{R}$ is a differentiable function
with bounded derivative, the
\begin{equation*}
\left\vert f(x)-\frac{1}{b-a}\int\limits_{a}^{b}f(t)dt\right\vert \leq \left[
\frac{1}{4}+\frac{(x-\frac{a+b}{2})^{2}}{(b-a)^{2}}\right] (b-a)\left\Vert
f^{\prime }\right\Vert _{\infty }
\end{equation*}
for every $x\in \lbrack a,b]$. Moreover the constant $1/4$ is the best
possible.
For some generalizations of this classic fact see the book \cite[p.468-484
{mitrovich} by Mitrinovic, Pecaric and Fink. A simple proof of this fact can
be \ done by using the following identity \cite{mitrovich}:
If $f:[a,b]\rightarrow \mathbb{R}$ is differentiable on $[a,b]$ with the
first derivative $f^{\prime }$ integrable on $[a,b],$ then Montgomery
identity holds
\begin{equation}
f(t)=\frac{1}{b-a}\int\limits_{a}^{b}f(s)ds+\int\limits_{a}^{b}P(t,s)f^
\prime }(s)ds, \label{h}
\end{equation
where $P(x,t)$ is the Peano kernel defined b
\begin{equation}
P(t,s):=\left\{
\begin{array}{ll}
\dfrac{s-a}{b-a}, & a\leq s<t \\
& \\
\dfrac{s-b}{b-a}, & t\leq s\leq b
\end{array
\right. \label{hh}
\end{equation
Suppose now that $w:\left[ a,b\right] \rightarrow \lbrack 0,\infty )$ is
some probability density function, i.e. it is a positive integrable function
satisfying $\int_{a}^{b}w\left( t\right) dt=1$,\ and $W\left( t\right)
=\int_{a}^{t}w\left( x\right) dx$ for $t\in \left[ a,b\right] $, $W\left(
t\right) =0$ for $t<a$ and $W\left( t\right) =1$ for $t>b$. The following
identity (given by Pe\v{c}ari\'{c} in \cite{pecaric}) is the weighted
generalization of the Montgomery identity
\begin{equation}
f\left( x\right) =\int\limits_{a}^{b}w\left( t\right) f\left( t\right)
dt+\int\limits_{a}^{b}P_{w}\left( x,t\right) f^{^{\prime }}\left( t\right) d
\text{,} \label{hhh}
\end{equation
where the weighted Peano kernel i
\begin{equation*}
P_{w}(x,t):=\left\{
\begin{array}{ll}
W\left( t\right) , & a\leq t<x \\
& \\
W\left( t\right) -1, & x\leq t\leq b
\end{array
\right.
\end{equation*}
The Riemann-Liouville fractional integral operator of order $\alpha \geq 0$
with $a\geq 0$ is defined b
\begin{eqnarray}
J_{a}^{\alpha }f(x) &=&\frac{1}{\Gamma (\alpha )}\int\limits_{a}^{x}(x-t)^
\alpha -1}f(t)dt, \label{a} \\
J_{a}^{0}f(x) &=&f(x). \notag
\end{eqnarray
Recently, many authors have studied a number of inequalities by used the
Riemann-Liouville fractional integrals, see (\cite{Belarbi}-\cite{gorenflo},
\cite{sarikaya}, \cite{sarikaya1}) and the references cited therein. More
details, for necessary definitions and mathematical preliminaries of
fractional calculus theory, one can consult \cite{gorenflo}, \cite{samko}.
\section{Results}
\begin{theorem}
\label{3m} Let $f:\left[ a,b\right] \rightarrow \mathbb{R}$ be a
differentiable function on $\left[ a,b\right] $ such that $f^{\prime }\in
L_{p}\left[ a,b\right] $ with $\frac{1}{p}+\frac{1}{q}=1,\ p>1,$ and $\alpha
\geq 0$. Then, the following inequality holds
\begin{equation}
\left\vert \Gamma \left( \alpha +1\right) J_{a}^{\alpha }f\left( b\right)
-\left( b-a\right) ^{\alpha -1}\int\limits_{a}^{b}f\left( s\right)
ds\right\vert \leq \left( b-a\right) ^{\alpha +\frac{1}{q}}\left( \frac{1}
\left( \alpha q+1\right) ^{\frac{1}{q}}}+\frac{1}{\left( q+1\right) ^{\frac{
}{q}}}\right) \left\Vert f^{^{\prime }}\right\Vert _{p}. \label{9}
\end{equation}
\end{theorem}
\begin{proof}
We can write the Riemann-Liouville fractional integral operator as follows
\begin{equation}
\Gamma \left( \alpha \right) J_{a}^{\alpha }f\left( b\right)
=\int\limits_{a}^{b}\left( b-t\right) ^{\alpha -1}f\left( t\right) dt.
\label{1}
\end{equation
Thus, using Montgomery identity in (\ref{1}), we hav
\begin{eqnarray}
\Gamma \left( \alpha \right) J_{a}^{\alpha }f\left( b\right)
&=&\int\limits_{a}^{b}\left( b-t\right) ^{\alpha -1}\left[ \frac{1}{b-a
\int\limits_{a}^{b}f\left( s\right) ds+\int\limits_{a}^{b}P\left( t,s\right)
f^{^{\prime }}\left( s\right) ds\right] dt \notag \\
&& \label{2} \\
&=&\frac{1}{b-a}\int\limits_{a}^{b}\left( b-t\right) ^{\alpha -1}\left[
\int\limits_{a}^{b}f\left( s\right) ds+\int\limits_{a}^{t}\left( s-a\right)
f^{^{\prime }}\left( s\right) ds+\int\limits_{t}^{b}\left( s-b\right)
f^{^{\prime }}\left( s\right) ds\right] dt. \notag
\end{eqnarray
By an interchange of the order of integration, we ge
\begin{equation}
\int\limits_{a}^{b}\left( b-t\right) ^{\alpha -1}\left(
\int\limits_{a}^{b}f\left( s\right) ds\right) dt=\frac{\left( b-a\right)
^{\alpha }}{\alpha }\int\limits_{a}^{b}f\left( s\right) ds, \label{3}
\end{equation
\begin{equation}
\int\limits_{a}^{b}\left( b-t\right) ^{\alpha -1}\left(
\int\limits_{a}^{t}\left( s-a\right) f^{^{\prime }}\left( s\right) ds\right)
dt=\frac{b-a}{\alpha }\int\limits_{a}^{b}\left( b-s\right) ^{\alpha
}f^{^{\prime }}\left( s\right) ds-\frac{1}{\alpha }\int\limits_{a}^{b}\left(
b-s\right) ^{\alpha +1}f^{^{\prime }}\left( s\right) ds, \label{4}
\end{equation
\begin{equation}
\int\limits_{a}^{b}\left( b-t\right) ^{\alpha -1}\left(
\int\limits_{t}^{b}\left( s-b\right) f^{^{\prime }}\left( s\right) ds\right)
dt=\frac{1}{\alpha }\int\limits_{a}^{b}\left( b-s\right) ^{\alpha
+1}f^{^{\prime }}\left( s\right) ds-\frac{\left( b-a\right) ^{\alpha }}
\alpha }\int\limits_{a}^{b}\left( b-s\right) f^{^{\prime }}\left( s\right)
ds. \label{5}
\end{equation
Thus, using (\ref{3}), (\ref{4}) and (\ref{5}) in (\ref{2}) we get
\begin{eqnarray}
&&\Gamma \left( \alpha +1\right) J_{a}^{\alpha }f\left( b\right) -\left(
b-a\right) ^{\alpha -1}\int\limits_{a}^{b}f\left( s\right) ds \notag \\
&& \label{z} \\
&=&\int\limits_{a}^{b}\left( b-s\right) ^{\alpha }f^{^{\prime }}\left(
s\right) ds-\left( b-a\right) ^{\alpha -1}\int\limits_{a}^{b}\left(
b-s\right) f^{^{\prime }}\left( s\right) ds\text{, }\alpha \geq 0. \notag
\end{eqnarray
By taking the modulus and applying H\"{o}lder inequality, we hav
\begin{eqnarray*}
&&\left\vert \Gamma \left( \alpha +1\right) J_{a}^{\alpha }f\left( b\right)
-\left( b-a\right) ^{\alpha -1}\int\limits_{a}^{b}f\left( s\right)
ds\right\vert \\
&& \\
&\leq &\left( \int\limits_{a}^{b}\left\vert f^{^{\prime }}\left( s\right)
\right\vert ^{p}ds\right) ^{\frac{1}{p}}\left( \int\limits_{a}^{b}\left(
b-s\right) ^{\alpha q}ds\right) ^{\frac{1}{q}} \\
&& \\
&&+\left( b-a\right) ^{\alpha -1}\left( \int\limits_{a}^{b}\left\vert
f^{^{\prime }}\left( s\right) \right\vert ^{p}ds\right) ^{\frac{1}{p}}\left(
\int\limits_{a}^{b}\left( b-s\right) ^{q}ds\right) ^{\frac{1}{q}} \\
&& \\
&=&\left( b-a\right) ^{\alpha +\frac{1}{q}}\left( \frac{1}{\left( \alpha
q+1\right) ^{\frac{1}{q}}}+\frac{1}{\left( q+1\right) ^{\frac{1}{q}}}\right)
\left\Vert f^{^{\prime }}\right\Vert _{p}.
\end{eqnarray*
The proof is completed.
\end{proof}
\begin{corollary}
Under the assumptions Theorem \ref{3m} with $\alpha =0,$\ we hav
\begin{equation*}
\left\vert f\left( b\right) -\frac{1}{b-a}\int\limits_{a}^{b}f\left(
s\right) ds\right\vert \leq \left( b-a\right) ^{\frac{1}{q}}\left( 1+\frac{
}{\left( q+1\right) ^{\frac{1}{q}}}\right) \left\Vert f^{^{\prime
}}\right\Vert _{p}.
\end{equation*}
\end{corollary}
\begin{theorem}
\label{2m} Let $f:\left[ a,b\right] \rightarrow \mathbb{R}$ be a
differentiable function on $\left[ a,b\right] $ and $\left\vert f^{\prime
}\left( x\right) \right\vert \leq M$, for every $x\in $ $\left[ a,b\right] $
and $\alpha \geq 0$. Then the following inequality holds
\begin{equation}
\left\vert J_{a}^{\alpha }f\left( b\right) -\frac{\left( b-a\right) ^{\alpha
-1}}{\Gamma \left( \alpha +1\right) }\int\limits_{a}^{b}f\left( s\right)
ds\right\vert \leq \frac{M\left( \alpha +3\right) \left( b-a\right) ^{\alpha
+1}}{2\Gamma \left( \alpha +2\right) }. \label{7}
\end{equation}
\end{theorem}
\begin{proof}
By use the (\ref{z}), we hav
\begin{eqnarray}
&&\left\vert \Gamma \left( \alpha +1\right) J_{a}^{\alpha }f\left( b\right)
-\left( b-a\right) ^{\alpha -1}\int\limits_{a}^{b}f\left( s\right)
ds\right\vert \notag \\
&& \label{8} \\
&\leq &\int\limits_{a}^{b}\left( b-s\right) ^{\alpha }\left\vert f^{^{\prime
}}\left( s\right) \right\vert ds+\left( b-a\right) ^{\alpha
-1}\int\limits_{a}^{b}\left( b-s\right) \left\vert f^{^{\prime }}\left(
s\right) \right\vert ds. \notag
\end{eqnarray
Since $\left\vert f^{^{\prime }}\left( x\right) \right\vert \leq M,$ we get
the required inequality which the proof is completed.
\end{proof}
\begin{corollary}
Under the assumptions Theorem \ref{2m} with $\alpha =0,$\ we hav
\begin{equation*}
\left\vert f\left( b\right) -\frac{1}{b-a}\int\limits_{a}^{b}f\left(
s\right) ds\right\vert \leq \frac{3\left( b-a\right) }{2}M.
\end{equation*}
\end{corollary}
\begin{theorem}
\label{4m} Let $w:\left[ a,b\right] \rightarrow \mathbb{[}0,\infty )$ be a
probability density function, i.e. $\int_{a}^{b}w\left( t\right) dt=1$, and
set $W\left( t\right) =\int_{a}^{t}w\left( x\right) dx$ for $a\leq t\leq b$,
$W\left( t\right) =0$ for $t<a$ and $W\left( t\right) =1$\ for $t>b$. Let $f
\left[ a,b\right] \rightarrow \mathbb{R}$ be a differentiable function on
\left[ a,b\right] $ such that $f^{\prime }\in L_{p}\left[ a,b\right] $ with
\frac{1}{p}+\frac{1}{q}=1,\ p>1,$ and $\alpha \geq 0$. Then the following
inequality holds
\begin{eqnarray}
&&\left\vert \Gamma \left( \alpha +1\right) J_{a}^{\alpha }f\left( b\right)
-\left( b-a\right) ^{\alpha }\int\limits_{a}^{b}w\left( s\right) f\left(
s\right) ds\right\vert \label{16} \\
&\leq &\left\Vert f^{^{\prime }}\right\Vert _{p}\left( b-a\right) ^{\alpha }
\left[ \left( \int\limits_{a}^{b}\left\vert W(s)-1\right\vert ^{q}ds\right)
^{\frac{1}{q}}+\left( \frac{b-a}{\alpha q+1}\right) ^{\frac{1}{q}}\right] .
\notag
\end{eqnarray}
\end{theorem}
\begin{proof}
By using (\ref{hhh}) in (\ref{1}), we hav
\begin{eqnarray}
\Gamma \left( \alpha \right) J_{a}^{\alpha }f\left( b\right)
&=&\int\limits_{a}^{b}\left( b-t\right) ^{\alpha -1}\left[
\int\limits_{a}^{b}w\left( s\right) f\left( s\right)
ds+\int\limits_{a}^{b}P_{w}\left( t,s\right) f^{^{\prime }}\left( s\right) d
\right] dt \notag \\
&& \notag \\
&=&\int\limits_{a}^{b}\left( b-t\right) ^{\alpha -1}\left(
\int\limits_{a}^{b}w\left( s\right) f\left( s\right) ds\right) dt \label{y}
\\
&&+\int\limits_{a}^{b}\left( b-t\right) ^{\alpha -1}\left(
\int\limits_{a}^{t}W\left( s\right) f^{^{\prime }}\left( s\right) ds\right)
dt \notag \\
&&+\int\limits_{a}^{b}\left( b-t\right) ^{\alpha -1}\left(
\int\limits_{t}^{b}\left( W\left( s\right) -1\right) f^{^{\prime }}\left(
s\right) ds\right) dt. \notag
\end{eqnarray
By an interchange of the order of integration, we ge
\begin{equation}
\int\limits_{a}^{b}\left( b-t\right) ^{\alpha -1}\left(
\int\limits_{a}^{b}w\left( s\right) f\left( s\right) ds\right) dt=\frac
\left( b-a\right) ^{\alpha }}{\alpha }\int\limits_{a}^{b}w\left( s\right)
f\left( s\right) ds, \label{11}
\end{equation
\begin{equation}
\int\limits_{a}^{b}\left( b-t\right) ^{\alpha -1}\left(
\int\limits_{a}^{t}W\left( s\right) f^{^{\prime }}\left( s\right) ds\right)
dt=\frac{1}{\alpha }\int\limits_{a}^{b}\left( b-s\right) ^{\alpha }W\left(
s\right) f^{^{\prime }}\left( s\right) ds, \label{120}
\end{equation
an
\begin{eqnarray}
&&\int\limits_{a}^{b}\left( b-t\right) ^{\alpha -1}\left(
\int\limits_{t}^{b}\left( W\left( s\right) -1\right) f^{^{\prime }}\left(
s\right) ds\right) dt \label{13} \\
&=&\frac{1}{\alpha }\left[ \left( b-a\right) ^{\alpha }\int\limits_{a}^{b
\left[ W(s)-1\right] f^{^{\prime }}\left( s\right)
ds+\int\limits_{a}^{b}\left( b-s\right) ^{\alpha }f^{^{\prime }}\left(
s\right) ds\right] . \notag
\end{eqnarray
Thus, using (\ref{11}), (\ref{120}) and (\ref{13}) in (\ref{y}) we get
\begin{eqnarray}
&&\Gamma \left( \alpha +1\right) J_{a}^{\alpha }f\left( b\right) -\left(
b-a\right) ^{\alpha }\int\limits_{a}^{b}w\left( s\right) f\left( s\right) ds
\label{z1} \\
&=&\left( b-a\right) ^{\alpha }\int\limits_{a}^{b}\left[ W(s)-1\right]
f^{^{\prime }}\left( s\right) ds+\int\limits_{a}^{b}\left( b-s\right)
^{\alpha }f^{^{\prime }}\left( s\right) ds. \notag
\end{eqnarray
By taking the modulus and applying H\"{o}lder inequality, we hav
\begin{eqnarray*}
&&\left\vert \Gamma \left( \alpha +1\right) J_{a}^{\alpha }f\left( b\right)
-\left( b-a\right) ^{\alpha }\int\limits_{a}^{b}w\left( s\right) f\left(
s\right) ds\right\vert \\
&& \\
&\leq &\left( b-a\right) ^{\alpha }\left( \int\limits_{a}^{b}\left\vert
f^{^{\prime }}\left( s\right) \right\vert ^{p}ds\right) ^{\frac{1}{p}}\left(
\int\limits_{a}^{b}\left\vert W(s)-1\right\vert ^{q}ds\right) ^{\frac{1}{q}}
\\
&& \\
&&+\left( \int\limits_{a}^{b}\left\vert f^{^{\prime }}\left( s\right)
\right\vert ^{p}ds\right) ^{\frac{1}{p}}\left( \int\limits_{a}^{b}\left(
b-s\right) ^{\alpha q}ds\right) ^{\frac{1}{q}} \\
&& \\
&=&\left\Vert f^{^{\prime }}\right\Vert _{p}\left( b-a\right) ^{\alpha }
\left[ \left( \int\limits_{a}^{b}\left\vert W(s)-1\right\vert ^{q}ds\right)
^{\frac{1}{q}}+\left( \frac{b-a}{\alpha q+1}\right) ^{\frac{1}{q}}\right]
\end{eqnarray*
which the proof is completed.
\end{proof}
\begin{corollary}
Under the assumptions Theorem \ref{4m} with $\alpha =0,$\ we hav
\begin{equation*}
\left\vert f\left( b\right) -\int\limits_{a}^{b}w\left( s\right) f\left(
s\right) ds\right\vert \leq \left[ \left( \int\limits_{a}^{b}\left\vert
W(s)-1\right\vert ^{q}ds\right) ^{\frac{1}{q}}+\left( b-a\right) ^{\frac{1}{
}}\right] \left\Vert f^{^{\prime }}\right\Vert _{p}.
\end{equation*}
\end{corollary}
\begin{theorem}
\label{5m} Let $w:\left[ a,b\right] \rightarrow \mathbb{[}0,\infty )$ be a
probability density function, i.e. $\int_{a}^{b}w\left( t\right) dt=1$, and
set $W\left( t\right) =\int_{a}^{t}w\left( x\right) dx$ for $a\leq t\leq b$,
$W\left( t\right) =0$ for $t<a$ and $W\left( t\right) =1$\ for $t>b$. Let $f
\left[ a,b\right] \rightarrow \mathbb{R}$ be a differentiable function on
\left[ a,b\right] $ and $\left\vert f^{\prime }\left( x\right) \right\vert
\leq M$, for every $x\in $ $\left[ a,b\right] $ and $\alpha \geq 0$. Then
the following inequality holds:
\begin{equation}
\left\vert \Gamma \left( \alpha +1\right) J_{a}^{\alpha }f\left( b\right)
-\left( b-a\right) ^{\alpha }\int\limits_{a}^{b}w\left( s\right) f\left(
s\right) ds\right\vert \leq M\left( b-a\right) ^{\alpha }\left(
\int\limits_{a}^{b}\left\vert W(s)-1\right\vert ds-\frac{b-a}{\alpha +1
\right) . \label{14}
\end{equation}
\end{theorem}
\begin{proof}
From (\ref{z1}), we hav
\begin{eqnarray}
&&\left\vert \Gamma \left( \alpha +1\right) J_{a}^{\alpha }f\left( b\right)
-\left( b-a\right) ^{\alpha }\int\limits_{a}^{b}w\left( s\right) f\left(
s\right) ds\right\vert \notag \\
&& \label{15} \\
&\leq &\left( b-a\right) ^{\alpha }\int\limits_{a}^{b}\left\vert
W(s)-1\right\vert \left\vert f^{^{\prime }}\left( s\right) \right\vert
ds+\int\limits_{a}^{b}\left( b-s\right) ^{\alpha }\left\vert f^{^{\prime
}}\left( s\right) \right\vert ds. \notag
\end{eqnarray
By using $\left\vert f^{^{\prime }}\left( x\right) \right\vert \leq M$, the
proof is completed.
\end{proof}
|
{
"timestamp": "2012-06-27T02:04:29",
"yymm": "1206",
"arxiv_id": "1206.6002",
"language": "en",
"url": "https://arxiv.org/abs/1206.6002"
}
|
\section{$G/N \cong Sz(8)$ and $|N| = 2^{12}$}
The group $S = Sz(8)$ has exactly one irreducible $12$-dimensional module
over the field with two elements, up to isomorphism.
This module can be obtained from any of the three absolutely irreducible
$4$-dimensional $S$-modules in characteristic two,
by regarding it as a module over the prime field ${\mathbb{F}}_2$.
\begin{verbatim}
gap> p:= 2;; d:= 12;;
gap> t:= CharacterTable( "Sz(8)" ) mod p;
BrauerTable( "Sz(8)", 2 )
gap> irr:= Filtered( Irr( t ), x -> x[1] <= d );;
gap> Display( t, rec( chars:= irr, powermap:= false, centralizers:= false ) );
Sz(8)mod2
1a 5a 7a 7b 7c 13a 13b 13c
Y.1 1 1 1 1 1 1 1 1
Y.2 4 -1 A C B D F E
Y.3 4 -1 B A C E D F
Y.4 4 -1 C B A F E D
A = E(7)^2+E(7)^3+E(7)^4+E(7)^5
B = E(7)+E(7)^2+E(7)^5+E(7)^6
C = E(7)+E(7)^3+E(7)^4+E(7)^6
D = E(13)+E(13)^5+E(13)^8+E(13)^12
E = E(13)^4+E(13)^6+E(13)^7+E(13)^9
F = E(13)^2+E(13)^3+E(13)^10+E(13)^11
gap> List( irr, x -> SizeOfFieldOfDefinition( x, p ) );
[ 2, 8, 8, 8 ]
\end{verbatim}
First we construct the $12$-dimensional irreducible representation of $S$
over ${\mathbb{F}}_2$,
using that the {\textsc{Atlas}} of Group Representations provides matrix generators
for $S$ in the $4$-dimensional representation over ${\mathbb{F}}_8$.
\begin{verbatim}
gap> info:= OneAtlasGeneratingSetInfo( "Sz(8)", Dimension, 4,
> Characteristic, p );
rec( charactername := "4a", dim := 4, groupname := "Sz(8)", id := "a",
identifier := [ "Sz(8)", [ "Sz8G1-f8r4aB0.m1", "Sz8G1-f8r4aB0.m2" ], 1, 8 ],
repname := "Sz8G1-f8r4aB0", repnr := 17, ring := GF(2^3), size := 29120,
standardization := 1, type := "matff" )
gap> gens_dim4:= AtlasGenerators( info ).generators;;
gap> b:= Basis( GF(8) );;
gap> gens_dim12:= List( gens_dim4, x -> BlownUpMatrix( b, x ) );;
\end{verbatim}
We claim that any extension of $S$ with the given module splits.
\begin{verbatim}
gap> s:= AtlasGroup( "Sz(8)", IsPermGroup, true );;
gap> chr:= CHR( s, p, 0, gens_dim12 );;
gap> SecondCohomologyDimension( chr );
0
\end{verbatim}
(The function \verb|CHR| takes as its arguments a permutation group,
the characteristic of the module, a finitely presented group (or zero),
and a list of matrices that define the module in the sense that they
correspond to the generators of the given permutation group.
Note that this condition is satisfied because the generators provided by
the {\textsc{Atlas}} of Group Representations are compatible.)
So it is enough to consider the semidirect product $G = 2^{12}\!:\!Sz(8)$.
The {\textsf{GAP}} Character Table Library contains the ordinary character table
of $G$.
We check this as follows.
By the above cohomology result,
the group $G$ is uniquely determined, up to isomorphism, by the group order
and the property that $G$ has a minimal normal subgroup $N$
such that $G/N$ is a simple group isomorphic with $S$.
(Since $|G|/|S|$ is a power of two, $N$ is a $2$-group.
By the minimality condition, $N$ is elementary abelian and the action of $S$
on $N$ affords the desired $S$-module.
Note that the isomorphism type of a finite simple group is determined
by its character table.)
\begin{verbatim}
gap> iso:= IsomorphismTypeInfoFiniteSimpleGroup( s );
rec( name := "2B(2,8) = 2C(2,8) = Sz(8)", parameter := 8, series := "2B" )
gap> names:= AllCharacterTableNames( Size, 2^12 * Size( s ) );;
gap> cand:= List( names, CharacterTable );;
gap> cand:= Filtered( cand,
> t -> ForAny( ClassPositionsOfMinimalNormalSubgroups( t ),
> n -> IsomorphismTypeInfoFiniteSimpleGroup( t / n ) = iso ) );
[ CharacterTable( "2^12:Sz(8)" ) ]
\end{verbatim}
So we can easily check that $G$ has eight rational valued irreducibles
of the degree $455$ (or of the degree $3\,640$).
\begin{verbatim}
gap> t:= cand[1];;
gap> rationals:= Filtered( Irr( t ), x -> IsSubset( Integers, x ) );;
gap> Collected( List( rationals, x -> x[1] ) );
[ [ 1, 1 ], [ 64, 1 ], [ 91, 1 ], [ 455, 8 ], [ 3640, 8 ] ]
\end{verbatim}
\section{$G/N \cong M_{22}$ and $|N| = 2^{10}$}
The group $S = M_{22}$ has exactly two irreducible $10$-dimensional modules
over the field with two elements, up to isomorphism.
These modules are in fact absolutely irreducible.
\begin{verbatim}
gap> p:= 2;; d:= 10;;
gap> t:= CharacterTable( "M22" ) mod p;
BrauerTable( "M22", 2 )
gap> irr:= Filtered( Irr( t ), x -> x[1] <= d );;
gap> Display( t, rec( chars:= irr, powermap:= false, centralizers:= false ) );
M22mod2
1a 3a 5a 7a 7b 11a 11b
Y.1 1 1 1 1 1 1 1
Y.2 10 1 . A /A -1 -1
Y.3 10 1 . /A A -1 -1
A = E(7)+E(7)^2+E(7)^4
= (-1+Sqrt(-7))/2 = b7
gap> List( irr, x -> SizeOfFieldOfDefinition( x, p ) );
[ 2, 2, 2 ]
\end{verbatim}
First we construct the two irreducible $10$-dimensional representations
of $S$ over ${\mathbb{F}}_2$,
again using that the {\textsc{Atlas}} of Group Representations provides the matrix
generators in question.
\begin{verbatim}
gap> info:= AllAtlasGeneratingSetInfos( "M22", Dimension, d,
> Characteristic, p );
[ rec( charactername := "10a", dim := 10, groupname := "M22", id := "a",
identifier := [ "M22", [ "M22G1-f2r10aB0.m1", "M22G1-f2r10aB0.m2" ], 1,
2 ], repname := "M22G1-f2r10aB0", repnr := 13, ring := GF(2),
size := 443520, standardization := 1, type := "matff" ),
rec( charactername := "10b", dim := 10, groupname := "M22", id := "b",
identifier := [ "M22", [ "M22G1-f2r10bB0.m1", "M22G1-f2r10bB0.m2" ], 1,
2 ], repname := "M22G1-f2r10bB0", repnr := 14, ring := GF(2),
size := 443520, standardization := 1, type := "matff" ) ]
gap> gens:= List( info, r -> AtlasGenerators( r ).generators );;
\end{verbatim}
We claim that any extension of $S$ with any of the two given modules splits.
\begin{verbatim}
gap> s:= AtlasGroup( "M22", IsPermGroup, true );;
gap> chr:= CHR( s, p, 0, gens[1] );;
gap> SecondCohomologyDimension( chr );
0
gap> chr:= CHR( s, p, 0, gens[2] );;
gap> SecondCohomologyDimension( chr );
0
\end{verbatim}
Again we see that it is enough to consider semidirect products
$G = 2^{10}\!:\!M_{22}$, but this time for the two nonisomorphic modules.
The {\textsf{GAP}} Character Table Library contains the ordinary character tables
of the two groups in question.
We check this with the same approach as in the previous examples.
\begin{verbatim}
gap> iso:= IsomorphismTypeInfoFiniteSimpleGroup( s );
rec( name := "M(22)", series := "Spor" )
gap> names:= AllCharacterTableNames( Size, 2^10 * Size( s ) );;
gap> cand:= List( names, CharacterTable );;
gap> cand:= Filtered( cand,
> t -> ForAny( ClassPositionsOfMinimalNormalSubgroups( t ),
> n -> IsomorphismTypeInfoFiniteSimpleGroup( t / n ) = iso ) );
[ CharacterTable( "2^10:M22'" ), CharacterTable( "2^10:m22" ) ]
gap> List( cand, NrConjugacyClasses );
[ 47, 43 ]
\end{verbatim}
So we can easily check that in both cases,
$G$ has two rational valued irreducibles of the degree $1\,155$.
\begin{verbatim}
gap> t:= cand[1];;
gap> rationals:= Filtered( Irr( t ), x -> IsSubset( Integers, x ) );;
gap> Collected( List( rationals, x -> x[1] ) );
[ [ 1, 1 ], [ 21, 1 ], [ 22, 1 ], [ 55, 1 ], [ 99, 1 ], [ 154, 1 ],
[ 210, 1 ], [ 231, 3 ], [ 385, 1 ], [ 440, 1 ], [ 770, 5 ], [ 924, 2 ],
[ 1155, 2 ], [ 1386, 1 ], [ 1408, 1 ], [ 3080, 2 ], [ 3465, 4 ],
[ 4620, 2 ], [ 6930, 3 ], [ 9240, 1 ] ]
gap> t:= cand[2];;
gap> rationals:= Filtered( Irr( t ), x -> IsSubset( Integers, x ) );;
gap> Collected( List( rationals, x -> x[1] ) );
[ [ 1, 1 ], [ 21, 1 ], [ 55, 1 ], [ 77, 1 ], [ 99, 1 ], [ 154, 1 ],
[ 210, 1 ], [ 231, 1 ], [ 330, 1 ], [ 385, 3 ], [ 616, 2 ], [ 693, 1 ],
[ 770, 1 ], [ 1155, 2 ], [ 1980, 1 ], [ 2310, 4 ], [ 2640, 1 ],
[ 3465, 2 ], [ 4620, 1 ], [ 5544, 2 ], [ 6160, 1 ], [ 6930, 2 ],
[ 9856, 1 ] ]
\end{verbatim}
\section{$G/N \cong J_2$ and $|N| = 2^{12}$}
The group $S = J_2$ has exactly one irreducible $12$-dimensional module
over the field with two elements, up to isomorphism.
This module can be obtained from any of the two absolutely irreducible
$6$-dimensional $S$-modules in characteristic two,
by regarding it as a module over the prime field ${\mathbb{F}}_2$.
\begin{verbatim}
gap> p:= 2;; d:= 12;;
gap> t:= CharacterTable( "J2" ) mod p;
BrauerTable( "J2", 2 )
gap> irr:= Filtered( Irr( t ), x -> x[1] <= d );;
gap> Display( t, rec( chars:= irr, powermap:= false, centralizers:= false ) );
J2mod2
1a 3a 3b 5a 5b 5c 5d 7a 15a 15b
Y.1 1 1 1 1 1 1 1 1 1 1
Y.2 6 -3 . A *A B *B -1 C *C
Y.3 6 -3 . *A A *B B -1 *C C
A = -2*E(5)-2*E(5)^4
= 1-Sqrt(5) = 1-r5
B = E(5)+2*E(5)^2+2*E(5)^3+E(5)^4
= (-3-Sqrt(5))/2 = -2-b5
C = E(5)+E(5)^4
= (-1+Sqrt(5))/2 = b5
gap> List( irr, x -> SizeOfFieldOfDefinition( x, p ) );
[ 2, 4, 4 ]
\end{verbatim}
First we construct the irreducible $12$-dimensional representation of $S$
over ${\mathbb{F}}_2$,
using that the {\textsc{Atlas}} of Group Representations provides matrix generators
for $S$ in the $6$-dimensional representation over ${\mathbb{F}}_4$.
\begin{verbatim}
gap> info:= OneAtlasGeneratingSetInfo( "J2", Dimension, 6,
> Characteristic, p );
rec( charactername := "6a", dim := 6, groupname := "J2", id := "a",
identifier := [ "J2", [ "J2G1-f4r6aB0.m1", "J2G1-f4r6aB0.m2" ], 1, 4 ],
repname := "J2G1-f4r6aB0", repnr := 16, ring := GF(2^2), size := 604800,
standardization := 1, type := "matff" )
gap> gens_dim6:= AtlasGenerators( info ).generators;;
gap> b:= Basis( GF(4) );;
gap> gens_dim12:= List( gens_dim6, x -> BlownUpMatrix( b, x ) );;
\end{verbatim}
We claim that any extension of $S$ with the given module splits.
\begin{verbatim}
gap> s:= AtlasGroup( "J2", IsPermGroup, true );;
gap> chr:= CHR( s, p, 0, gens_dim12 );;
gap> SecondCohomologyDimension( chr );
0
\end{verbatim}
Again we see that it is enough to consider a semidirect product
$G = 2^{12}\!:\!J_2$.
The {\textsf{GAP}} Character Table Library contains the ordinary character table
of $G$.
We check this with the same approach as in the previous examples.
\begin{verbatim}
gap> iso:= IsomorphismTypeInfoFiniteSimpleGroup( s );
rec( name := "HJ = J(2) = F(5-)", series := "Spor" )
gap> names:= AllCharacterTableNames( Size, 2^12 * Size( s ) );;
gap> cand:= List( names, CharacterTable );;
gap> cand:= Filtered( cand,
> t -> ForAny( ClassPositionsOfMinimalNormalSubgroups( t ),
> n -> IsomorphismTypeInfoFiniteSimpleGroup( t / n ) = iso ) );
[ CharacterTable( "2^12:J2" ) ]
\end{verbatim}
So we can easily check that $G$ has two rational valued irreducibles
of the degree $1\,575$.
\begin{verbatim}
gap> t:= cand[1];;
gap> rationals:= Filtered( Irr( t ), x -> IsSubset( Integers, x ) );;
gap> Collected( List( rationals, x -> x[1] ) );
[ [ 1, 1 ], [ 36, 1 ], [ 63, 1 ], [ 90, 1 ], [ 126, 1 ], [ 160, 1 ],
[ 175, 1 ], [ 225, 1 ], [ 288, 1 ], [ 300, 1 ], [ 336, 1 ], [ 1575, 2 ],
[ 2520, 4 ], [ 3150, 1 ], [ 4725, 6 ], [ 9450, 1 ], [ 10080, 4 ],
[ 12600, 4 ], [ 18900, 2 ] ]
\end{verbatim}
\section{$G/N \cong J_2$ and $|N| = 5^{14}$}
The group $S = J_2$ has exactly one irreducible $14$-dimensional module
over the field with $5$ elements, up to isomorphism.
This module is in fact absolutely irreducible.
\begin{verbatim}
gap> p:= 5;; d:= 14;;
gap> t:= CharacterTable( "J2" ) mod p;
BrauerTable( "J2", 5 )
gap> irr:= Filtered( Irr( t ), x -> x[1] <= d );;
gap> Display( t, rec( chars:= irr, powermap:= false, centralizers:= false ) );
J2mod5
1a 2a 2b 3a 3b 4a 6a 6b 7a 8a 12a
Y.1 1 1 1 1 1 1 1 1 1 1 1
Y.2 14 -2 2 5 -1 2 1 -1 . . -1
\end{verbatim}
In this case, we do not attempt to compute the complete character table of
$G$.
Instead, we show that $G/N$ has at least five regular orbits on the
dual space of $N$, and apply~\cite[Lemma~5.1~(i)]{DNT}.
(Note that $N$ is in fact self-dual.)
For that, we use {\textsf{GAP}}'s table of marks of $S$.
The information stored for this table of marks allows us to compute,
for each class of subgroups $U$ of $S$, the numbers of orbits in the dual
space of $N$ for which contain the point stabilizers in $S$ are exactly
the conjugates of $U$.
The following {\textsf{GAP}} function takes the table of marks \verb|tom| of $S$,
a list \verb|matgens| of matrices that describe the action of the generators of
$S$ on the vector space in question, and the size \verb|q| of its field of scalars.
The return value is a record with the components
\verb|fixed| (the vector of numbers of fixed points of the subgroups of $S$
on the dual of $N$),
\verb|decomp| (the numbers of orbits with the corresponding point stabilizers),
\verb|nonzeropos| (the positions of subgroups that occur as point stabilizers),
and \verb|staborders| (the list of orders of the subgroups that occur as
point stabilizers).
\begin{verbatim}
gap> orbits_from_tom:= function( tom, matgens, q )
> local slp, fixed, idmat, i, rest, decomp, nonzeropos;
>
> slp:= StraightLineProgramsTom( tom );
> fixed:= [];
> idmat:= matgens[1]^0;
> for i in [ 1 .. Length( slp ) ] do
> if IsList( slp[i] ) then
> # Each subgroup generator has a program of its own.
> rest:= List( slp[i],
> prg -> ResultOfStraightLineProgram( prg, gens ) );
> else
> # The subgroup generators are computed with one common program.
> rest:= ResultOfStraightLineProgram( slp[i], gens );
> fi;
> if IsEmpty( rest ) then
> # The subgroup is trivial.
> fixed[i]:= q^Length( idmat );
> else
> # Compute the intersection of fixed spaces of the transposed
> # matrices, since we act on Irr(N) not on N.
> fixed[i]:= q^Length( NullspaceMat( TransposedMat( Concatenation(
> List( rest, x -> x - idmat ) ) ) ) );
> fi;
> od;
>
> decomp:= DecomposedFixedPointVector( tom, fixed );
> nonzeropos:= Filtered( [ 1 .. Length( decomp ) ],
> i -> decomp[i] <> 0 );
>
> return rec( fixed:= fixed,
> decomp:= decomp,
> nonzeropos:= nonzeropos,
> staborders:= OrdersTom( tom ){ nonzeropos },
> );
> end;;
\end{verbatim}
Note that this function assumes that the generators of $S$ obtained from
the {\textsc{Atlas}} of Group Representations are compatible with the generators
from {\textsf{GAP}}'s table of marks of $S$.
This fact can be read off from the \verb|true| value of the \verb|ATLAS| component
in the \verb|StandardGeneratorsInfo| value of the table of marks.
\begin{verbatim}
gap> tom:= TableOfMarks( "J2" );
TableOfMarks( "J2" )
gap> StandardGeneratorsInfo( tom );
[ rec( ATLAS := true, description := "|z|=10, z^5=a, |b|=3, |C(b)|=36, |ab|=7"
, generators := "a, b",
script := [ [ 1, 10, 5 ], [ 2, 3 ], [ [ 2, 1 ], [ "|C(",, ")|" ], 36 ],
[ 1, 1, 2, 1, 7 ] ], standardization := 1 ) ]
\end{verbatim}
Alternatively, we can compute whether the generators are compatible,
as follows.
\begin{verbatim}
gap> info:= OneAtlasGeneratingSetInfo( "J2", Dimension, d, Ring, GF(p) );
rec( charactername := "14a", dim := 14, groupname := "J2", id := "",
identifier := [ "J2", [ "J2G1-f5r14B0.m1", "J2G1-f5r14B0.m2" ], 1, 5 ],
repname := "J2G1-f5r14B0", repnr := 19, ring := GF(5), size := 604800,
standardization := 1, type := "matff" )
gap> gens:= AtlasGenerators( info ).generators;;
gap> map:= GroupGeneralMappingByImages( UnderlyingGroup( tom ),
> Group( gens ), GeneratorsOfGroup( UnderlyingGroup( tom ) ), gens );;
gap> IsGroupHomomorphism( map );
true
\end{verbatim}
Now we are sure that we may apply the function \verb|orbits_from_tom|.
\begin{verbatim}
gap> orbits_from_tom( tom, gens, p );
rec( decomp := [ 8600, 0, 2512, 359, 10, 0, 0, 212, 5, 0, 0, 4, 0, 240, 16,
10, 0, 0, 0, 0, 10, 0, 0, 0, 0, 2, 0, 0, 36, 0, 0, 0, 26, 0, 0, 0, 0,
0, 0, 0, 20, 0, 10, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10, 0, 0,
5, 0, 0, 0, 26, 0, 10, 0, 0, 0, 0, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 10, 0, 0, 0, 2, 0, 0, 0, 0, 2, 4, 0, 0, 0, 0, 0, 4, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0, 4, 0, 0, 1 ],
fixed := [ 6103515625, 15625, 390625, 390625, 625, 25, 3125, 3125, 625,
625, 625, 625, 5, 3125, 125, 625, 25, 25, 125, 5, 125, 25, 125, 25, 25,
25, 5, 125, 125, 125, 25, 25, 3125, 1, 1, 5, 5, 25, 5, 25, 125, 5, 25,
25, 25, 25, 25, 25, 5, 25, 25, 5, 25, 5, 5, 5, 5, 25, 25, 1, 125, 1, 5,
5, 125, 1, 25, 5, 25, 1, 5, 25, 5, 5, 25, 25, 5, 5, 5, 1, 5, 5, 1, 1,
1, 5, 1, 25, 25, 25, 1, 5, 25, 5, 5, 1, 1, 125, 5, 5, 5, 25, 5, 5, 5,
1, 1, 5, 5, 1, 5, 1, 5, 1, 1, 25, 5, 5, 1, 1, 1, 1, 5, 1, 1, 25, 1, 1,
5, 1, 1, 5, 1, 5, 1, 1, 5, 1, 5, 1, 1, 1, 5, 1, 1, 1 ],
nonzeropos := [ 1, 3, 4, 5, 8, 9, 12, 14, 15, 16, 21, 26, 29, 33, 41, 43,
44, 58, 61, 65, 67, 72, 89, 93, 98, 99, 105, 116, 126, 139, 143, 146 ],
staborders := [ 1, 2, 3, 3, 4, 4, 5, 6, 6, 6, 8, 9, 10, 12, 12, 12, 14, 20,
24, 24, 24, 30, 48, 50, 60, 60, 72, 120, 192, 600, 1920, 604800 ] )
\end{verbatim}
We see that $S$ has $8\,600$ regular orbits on (the dual space of) $N$.
\section{$G/N \cong J_2$ and $|N| = 2^{28}$}
The group $S = J_2$ has exactly one irreducible $28$-dimensional module
over the field with two elements, up to isomorphism.
This module can be obtained from any of the two absolutely irreducible
$14$-dimensional $S$-modules in characteristic two,
by regarding it as a module over the prime field ${\mathbb{F}}_2$.
\begin{verbatim}
gap> p:= 2;; d:= 28;;
gap> t:= CharacterTable( "J2" ) mod p;
BrauerTable( "J2", 2 )
gap> irr:= Filtered( Irr( t ), x -> x[1] <= d );;
gap> Display( t, rec( chars:= irr, powermap:= false, centralizers:= false ) );
J2mod2
1a 3a 3b 5a 5b 5c 5d 7a 15a 15b
Y.1 1 1 1 1 1 1 1 1 1 1
Y.2 6 -3 . A *A C *C -1 D *D
Y.3 6 -3 . *A A *C C -1 *D D
Y.4 14 5 -1 B *B -C -*C . . .
Y.5 14 5 -1 *B B -*C -C . . .
A = -2*E(5)-2*E(5)^4
= 1-Sqrt(5) = 1-r5
B = -3*E(5)-3*E(5)^4
= (3-3*Sqrt(5))/2 = -3b5
C = E(5)+2*E(5)^2+2*E(5)^3+E(5)^4
= (-3-Sqrt(5))/2 = -2-b5
D = E(5)+E(5)^4
= (-1+Sqrt(5))/2 = b5
gap> List( irr, x -> SizeOfFieldOfDefinition( x, p ) );
[ 2, 4, 4, 4, 4 ]
\end{verbatim}
We use the same approach as in the previous example.
\begin{verbatim}
gap> tom:= TableOfMarks( "J2" );;
gap> info:= OneAtlasGeneratingSetInfo( "J2", Dimension, 14, Ring, GF(4) );;
gap> gens:= List( AtlasGenerators( info ).generators,
> x -> BlownUpMat( Basis(GF(4)), x ) );;
gap> orbits_from_tom( tom, gens, p );
rec( decomp := [ 235, 33, 282, 38, 0, 0, 6, 31, 36, 0, 0, 0, 3, 66, 9, 0, 0,
0, 0, 0, 0, 2, 18, 0, 0, 1, 0, 0, 15, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0,
12, 0, 0, 5, 0, 1, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 1, 3,
0, 9, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 3, 0, 0, 0, 6, 0, 0,
0, 0, 0, 0, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0,
0, 0, 0, 3, 0, 0, 0, 3, 0, 0, 1 ],
fixed := [ 268435456, 65536, 65536, 65536, 256, 1024, 4096, 1024, 1024,
256, 256, 256, 64, 1024, 64, 256, 16, 16, 64, 64, 64, 256, 256, 64, 16,
16, 64, 64, 64, 64, 16, 16, 1024, 4, 4, 4, 4, 16, 16, 16, 64, 16, 16,
16, 16, 64, 16, 16, 16, 64, 16, 16, 16, 16, 4, 16, 16, 16, 16, 1, 64,
4, 16, 4, 64, 4, 16, 4, 16, 1, 4, 16, 4, 4, 16, 16, 4, 4, 16, 1, 4, 16,
1, 1, 1, 16, 4, 16, 16, 16, 1, 4, 16, 4, 4, 1, 4, 64, 4, 4, 4, 16, 4,
4, 4, 1, 1, 4, 16, 1, 4, 1, 4, 1, 4, 16, 4, 4, 1, 1, 1, 1, 4, 1, 1, 16,
1, 1, 4, 1, 4, 4, 1, 4, 1, 1, 4, 1, 4, 1, 1, 1, 4, 1, 1, 1 ],
nonzeropos := [ 1, 2, 3, 4, 7, 8, 9, 13, 14, 15, 22, 23, 26, 29, 33, 41,
44, 46, 50, 61, 62, 63, 65, 72, 82, 93, 99, 105, 109, 116, 126, 131,
139, 143, 146 ],
staborders := [ 1, 2, 2, 3, 4, 4, 4, 6, 6, 6, 8, 8, 9, 10, 12, 12, 14, 16,
16, 24, 24, 24, 24, 30, 40, 50, 60, 72, 96, 120, 192, 240, 600, 1920,
604800 ] )
\end{verbatim}
We see that $S$ has $235$ regular orbits on (the dual space of) $N$.
\section{$G/N \cong {}^3D_4(2)$ and $|N| = 2^{26}$}
The group $S = {}^3D_4(2)$ has exactly one irreducible $26$-dimensional module
over the field with two elements, up to isomorphism.
This module is in fact absolutely irreducible.
\begin{verbatim}
gap> p:= 2;; d:= 26;;
gap> t:= CharacterTable( "3D4(2)" ) mod p;
BrauerTable( "3D4(2)", 2 )
gap> irr:= Filtered( Irr( t ), x -> x[1] <= d );;
gap> Display( t, rec( chars:= irr, powermap:= false, centralizers:= false ) );
3D4(2)mod2
1a 3a 3b 7a 7b 7c 7d 9a 9b 9c 13a 13b 13c 21a 21b 21c
Y.1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
Y.2 8 2 -1 A C B 1 D F E G I H J L K
Y.3 8 2 -1 B A C 1 E D F H G I K J L
Y.4 8 2 -1 C B A 1 F E D I H G L K J
Y.5 26 -1 -1 5 5 5 -2 2 2 2 . . . -1 -1 -1
A = 3*E(7)^2+E(7)^3+E(7)^4+3*E(7)^5
B = 3*E(7)+E(7)^2+E(7)^5+3*E(7)^6
C = E(7)+3*E(7)^3+3*E(7)^4+E(7)^6
D = -E(9)^2+E(9)^3-2*E(9)^4-2*E(9)^5+E(9)^6-E(9)^7
E = -E(9)^2+E(9)^3+E(9)^4+E(9)^5+E(9)^6-E(9)^7
F = 2*E(9)^2+E(9)^3+E(9)^4+E(9)^5+E(9)^6+2*E(9)^7
G = E(13)+E(13)^2+E(13)^3+E(13)^5+E(13)^8+E(13)^10+E(13)^11+E(13)^12
H = E(13)+E(13)^4+E(13)^5+E(13)^6+E(13)^7+E(13)^8+E(13)^9+E(13)^12
I = E(13)^2+E(13)^3+E(13)^4+E(13)^6+E(13)^7+E(13)^9+E(13)^10+E(13)^11
J = E(7)^3+E(7)^4
K = E(7)^2+E(7)^5
L = E(7)+E(7)^6
\end{verbatim}
We try the same approach as in the examples about the group $J_2$.
\begin{verbatim}
gap> tom:= TableOfMarks( "3D4(2)" );
TableOfMarks( "3D4(2)" )
gap> StandardGeneratorsInfo( tom );
[ rec( ATLAS := true, description := "|z|=8, z^4=a, |b|=9, |ab|=13, |abb|=8",
generators := "a, b",
script := [ [ 1, 8, 4 ], [ 2, 9 ], [ 1, 1, 2, 1, 13 ],
[ 1, 1, 2, 1, 2, 1, 8 ] ], standardization := 1 ) ]
gap> info:= OneAtlasGeneratingSetInfo( "3D4(2)", Dimension, 26, Ring, GF(2) );;
gap> gens:= AtlasGenerators( info ).generators;;
gap> map:= GroupGeneralMappingByImages( UnderlyingGroup( tom ),
> Group( gens ), GeneratorsOfGroup( UnderlyingGroup( tom ) ), gens );;
gap> IsGroupHomomorphism( map );
true
\end{verbatim}
Now we apply the function \verb|orbits_from_tom|.
\begin{verbatim}
gap> orbsinfo:= orbits_from_tom( tom, gens, p );;
gap> orbsinfo.fixed[1];
67108864
gap> orbsinfo.decomp[1];
0
\end{verbatim}
Unfortunately, $S$ has no regular orbit on (the dual of) $N$.
However, there is one orbit whose point stabilizer in $S$ is a dihedral group
$D_{18}$ of order $18$.
\begin{verbatim}
gap> orbsinfo.staborders;
[ 16, 16, 18, 42, 48, 52, 64, 72, 392, 1008, 1536, 3024, 3072, 3584, 258048,
211341312 ]
gap> orbsinfo.nonzeropos[3];
446
gap> orbsinfo.decomp[446];
1
gap> u:= RepresentativeTom( tom, 446 );
<permutation group of size 18 with 2 generators>
gap> IsDihedralGroup( u );
true
\end{verbatim}
Thus there ia a linear character $\lambda$ of $N$ whose inertia subgroup
$T = I_G(\lambda)$ has the structure $N.D_{18}$.
Now ${\rm Irr}( T | \lambda )$ can be identified with those irreducibles of
$T/\ker(\lambda)$ that restrict nontrivially to $N/\ker(\lambda)$,
and there are only two groups, up to isomorphism, that can occur as
$T/\ker(\lambda)$.
\begin{verbatim}
gap> cand:= Filtered( AllSmallGroups( 36 ),
> x -> Size( Centre( x ) ) = 2 and
> IsDihedralGroup( x / Centre( x ) ) );
[ <pc group of size 36 with 4 generators>,
<pc group of size 36 with 4 generators> ]
gap> List( cand, StructureDescription );
[ "C9 : C4", "D36" ]
\end{verbatim}
These two groups are a split and a nonsplit extension of the cyclic group
of order $18$ with a group of order two that acts by inverting.
In other words, these two groups are the direct product of $D_{18}$ with
a cyclic group of order two and the subdirect product of $D_{18}$ with
a cyclic group of order four.
Both groups possess irreducible characters of degree two, one rational
valued and the other not, which restrict nontrivially to the centre.
\begin{verbatim}
gap> Display( CharacterTable( "Dihedral", 18 ) );
Dihedral(18)
2 1 . . . . 1
3 2 2 2 2 2 .
1a 9a 9b 3a 9c 2a
2P 1a 9b 9c 3a 9a 1a
3P 1a 3a 3a 1a 3a 2a
X.1 1 1 1 1 1 1
X.2 1 1 1 1 1 -1
X.3 2 A B -1 C .
X.4 2 B C -1 A .
X.5 2 -1 -1 2 -1 .
X.6 2 C A -1 B .
A = -E(9)^2-E(9)^4-E(9)^5-E(9)^7
B = E(9)^2+E(9)^7
C = E(9)^4+E(9)^5
\end{verbatim}
By \cite[Lemma~5.1~(ii)]{DNT}, we are done.
\section{$G/N \cong {}^3D_4(2)$ and $|N| = 3^{25}$}
The group $S = {}^3D_4(2)$ has exactly one irreducible $25$-dimensional module
over the field with three elements, up to isomorphism.
This module is in fact absolutely irreducible.
\begin{verbatim}
gap> p:= 3;; d:= 25;;
gap> t:= CharacterTable( "3D4(2)" ) mod p;
BrauerTable( "3D4(2)", 3 )
gap> irr:= Filtered( Irr( t ), x -> x[1] <= d );;
gap> Display( t, rec( chars:= irr, powermap:= false, centralizers:= false ) );
3D4(2)mod3
1a 2a 2b 4a 4b 4c 7a 7b 7c 7d 8a 8b 13a 13b 13c 14a 14b 14c 28a 28b 28c
Y.1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
Y.2 25 -7 1 5 -3 1 4 4 4 -3 -1 -1 -1 -1 -1 . . . -2 -2 -2
\end{verbatim}
We use the same approach as in the examples about the group $J_2$.
\begin{verbatim}
gap> tom:= TableOfMarks( "3D4(2)" );;
gap> info:= OneAtlasGeneratingSetInfo( "3D4(2)", Dimension, d, Ring, GF(p) );;
gap> gens:= AtlasGenerators( info ).generators;;
gap> orbsinfo:= orbits_from_tom( tom, gens, p );;
gap> orbsinfo.fixed[1];
847288609443
gap> orbsinfo.decomp[1];
3551
\end{verbatim}
We see that $S$ has $3\,551$ regular orbits on (the dual space of) $N$.
\bibliographystyle{amsalpha}
\newcommand{\etalchar}[1]{$^{#1}$}
\providecommand{\bysame}{\leavevmode\hbox to3em{\hrulefill} }
\providecommand{\MR}{\relax\ifhmode\unskip \fi MR }
\providecommand{\MRhref}[2]{%
\href{http://www.ams.org/mathscinet-getitem?mr=#1}{#2}
}
\providecommand{\href}[2]{#2}
|
{
"timestamp": "2012-06-28T02:02:35",
"yymm": "1206",
"arxiv_id": "1206.6212",
"language": "en",
"url": "https://arxiv.org/abs/1206.6212"
}
|
\section{Introduction}
Since its introduction in the statistical~\citep{Tibshirani1996} and
signal processing~\citep{ChenDonoho1998} communities, the lasso has
become a fixture as both a data analysis tool \citep[for
example]{LeeZhu2010,ShiWahba2008} and as an object for deep
theoretical investigations
\citep{FuKnight2000,GreenshteinRitov2004,MeinshausenBuhlmann2006}. To
fix ideas, suppose that the observational model is of the form
\begin{equation}
Y = \mathbb{X} \theta + \sigma W.
\label{eq:fullLinearModel}
\end{equation}
where $Y=(Y_1,\ldots,Y_n)^{\top}$ is the vector of responses
and $\mathbb{X} \in \mathbb{R}^{n\times p}$ is the feature matrix, with rows
$(X_i^{\top})_{i=1}^n$, $W$ is a noise vector, and $\sigma$ is the
signal-to-noise ratio.
Under (\ref{eq:fullLinearModel}), the lasso estimator,
$\widehat \theta(\lambda)$, is defined to be
the minimizer of the following functional:
\begin{equation}
\widehat \theta(\lambda) := \argmin_{\theta} \frac{1}{2n} || Y-\mathbb{X}\theta ||_2^2 + \lambda || \theta ||_1.
\label{eq:regularized}
\end{equation}
Here, $\lambda \geq 0$ is a tuning parameter controlling the trade-off
between fidelity to the data (small $\lambda$) and sparsity (large $\lambda$).
We tacitly assume that $\mathbb{X}$ has full column rank,
and thus, $\widehat \theta(\lambda)$
is the unique minimum.
Under conditions on the matrix $\mathbb{X}$, noise
vector $W$, and the parameter $\theta$, the optimal choice of
$\lambda$ leads to risk consistency \citep{GreenshteinRitov2004}.
However, arguably the most crucial aspect of any procedure's
performance is the selection of the tuning parameters. Typically,
theory advocating the lasso's empirical properties specifies only the
rates. That is, this theory claims ``if $\lambda=\lambda_n$ goes to
zero at the correct rate, then $\widehat{\theta}(\lambda_n)$ will be
consistent in some sense.'' For the regularized problem
in~(\ref{eq:regularized}), taking $\lambda_n=o((\log(n)/n)^{1/2})$
gives risk consistency under very general conditions. However, this
type of theoretical guidance says nothing about the properties of the
lasso when the tuning parameter is chosen using the data.
There are several proposed techniques for choosing $\lambda$, such as
minimizing the empirical risk plus a penalty term based on the
degrees of freedom \citep{ZouHastie2007,TibshiraniTaylor2012}
or using an adapted Bayesian information criterion \citep{WangLeng2007}.
In many papers, \citep[for
example]{Tibshirani1996,GreenshteinRitov2004,HastieTibshirani2009,EfronHastie2004,ZouHastie2007,Tibshirani2011,GeerLederer2011}, the recommended technique for selecting
$\lambda$ is to choose $\lambda=\widehat{\lambda}_n$ such that
$\widehat{\lambda}_n$ minimizes a cross-validation estimator of the risk.
Some results supporting the use of cross-validation for statistical
algorithms other than lasso are known. For instance, kernel regression
\citep[Theorem 8.1]{Gyorfi2002}, $k$-nearest neighbors \citep[Theorem
8.2]{Gyorfi2002}, and various classification algorithms \citep{Schaffer1993} all
behave well with tuning parameters selected using the data. Additionally,
suppose we form the adaptive ridge regression estimator \citep{Grandvalet1998}
\begin{equation}
\argmin_{\theta,(\lambda_j)} \norm{Y - \mathbb{X} \theta}_2^2 + \sum_{j=1}^p \lambda_j \theta_j^2
\label{eq:adaptiveRidge}
\end{equation}
subject to the contraint $\lambda\sum_{j=1}^p 1/\lambda_j = p$. Then the solution
to equation \eqref{eq:adaptiveRidge} is equivalent, under a reparameterization of $\lambda$,
to the solution to equation \eqref{eq:regularized}. As ridge regression has been shown to have
good asymptotic properties under (generalized) cross-validation, there is reason to believe these properties
may carry over to lasso and cross-validation using this equivalence.
However, rigorous results for the lasso have yet to be developed.
The supporting theory for other methods indicates that there should
be corresponding theory for the lasso. However, other results are not
so encouraging. In particular, \citep{Shao1993} shows that
cross-validation is inconsistent for model selection. As lasso
implicitly does model selection, and shares many connections with
forward stagewise regression \citep{EfronHastie2004}, this raises a
concerning possibility that lasso might similiarly be inconsistent
under cross-validation. Likewise, \citep{LengLin2006} shows that using
prediction accuracy (which is what cross-validation estimates) as a
criterion for choosing the tuning parameter fails to recover the
sparsity pattern consistently in an orthogonal design
setting. Furthermore, \citep{XuMannor2008} show that sparsity inducing
algorithms like lasso are not (uniformly) algorithmically stable. In
other words, leave-one-out versions of the lasso estimator are not
uniformly close to each other. As shown in
\citep{BousquetElisseeff2002}, algorithmic stability is a sufficient,
but not necessary, condition for risk consistency.
These results taken as a whole leave the lasso in an unsatisfactory
position, with some theoretical results and generally accepted
practices advocating the use of cross-validation while others suggest
that it may not work. Our result partially resolves this antagonism by
showing that, in some cases, the lasso with cross-validated tuning
parameter is indeed risk consistent.
In this paper we provide a first result about the risk consistency of
lasso with the tuning parameter selected by cross-validation under
some assumptions about $\mathbb{X}$. In
Section~\ref{sec:notation-assumptions} we introduce our notation and
state our main theorem. In Section~\ref{sec:prelim-material} we state
some results necessary for our proof methods and in
Section~\ref{sec:proofs} we provide the proof. Lastly, in
Section~\ref{sec:discussion} we mention some implications of our main
theorem and some directions for future research.
\section{Notation, assumptions, and main results}
\label{sec:notation-assumptions}
The main assumptions we make for this paper ensure that the sequence $(X_i)_{i=1}^n$ is sufficiently regular.
These are
\textbf{Assumption A:}
\begin{equation}
C_n := \frac{1}{n} \sum_{i=1}^n X_i X_i^{\top} \rightarrow C,
\end{equation}
where $C$ is a positive definite matrix with $\textrm{eigen}_{\min}(C) = c_{\min} > 0$,
and
\textbf{Assumption B:}
There exists a constant $C_X < \infty$ independent of $n$ such that
\begin{equation}
\norm{X_i}_2 \leq C_X.
\end{equation}
Note that Assumption A appears repeatedly in the literature in
various contexts \citep[for
example]{Tibshirani1996,FuKnight2000,OsbornePresnell2000,LengLin2006}.
Additionally, Assumption B is effectively equivalent to assuming $\max_i \{ ||X_i||_2, 1\leq i \leq n\} = O(1)$ as
$n \rightarrow \infty$, which is also standard \citep[for example]{ChatterjeeLahiri2011}.
We define the predictive risk and the leave-one-out cross-validation
estimator of risk to be
\begin{equation}
R_n(\lambda) := \frac{1}{n}\mathbb{E} ||\mathbb{X}(\widehat\theta(\lambda) -
\theta)||^2 + \sigma^2
=
\mathbb{E} ||\widehat\theta(\lambda) -
\theta)||_{C_n}^2 + \sigma^2
\label{eq:trueRisk}
\end{equation}
and
\begin{equation}
\widehat R_n(\lambda) = \frac{1}{n} \sum_{i=1}^n (Y_i -
X_i^{\top}\hat\theta^{(i)}(\lambda))^2,
\label{eq:cvRisk}
\end{equation}
respectively. Here we are using $\hat\theta^{(i)}(\lambda)$ to
indicate the lasso estimator $\widehat\theta(\lambda)$
computed using all but the $i^{th}$ observation. Also,
we write the $\ell^2$-norm weighted by a matrix $A$ to be
$\norm{x}_A^2 = x^{\top}Ax$.
Lastly, let $\Lambda$ be a large, compact subset of $[0,\infty)$ the
specifics of which are unimportant. In practical situations, any
$\lambda \in [\max_j \widehat\theta_j(0),\infty)$ will
result in the same solution, namely $\widehat\theta_j(\lambda)=0$ for
all $j$, so any large finite upper bound is sufficient. Then define
\begin{align*}
\widehat{\lambda}_n := \argmin_{\lambda \in \Lambda} \widehat R_n(\lambda), &&
\textrm{and}
&& \lambda_n := \argmin_{\lambda \in \mathbb{R}^+} R_n(\lambda).
\end{align*}
For
$\widehat\theta(\lambda)$ to be consistent, it must hold that $\lambda
\rightarrow 0$ as $n\rightarrow\infty$. Hence, for some $N$, $n \geq
N$ implies $\lambda_n \in \Lambda \subset \mathbb{R}^+$. Therefore, without loss of
generality, we assume that $\lambda_n \in \Lambda$ for all $n$.
We spend the balance of this paper discussing and proving the
following result:
\begin{theorem}[Main Theorem]
\label{thm:mainTheorem} Suppose that Assumptions A and B hold and that
there exists a
$C_{\theta}<\infty$ such that $\norm{\theta}_1 \leq C_{\theta}$.
Also, suppose that $W_i \sim P_i$ are independently distributed and
that there exists a $\tau<\infty$ independent of $i$ such that
\[
E_{P_i}\left[e^{tW_i}\right] \leq e^{\tau^2 t^2/2}
\]
for all $t\in\mathbb{R}$. Then
\begin{equation}
R_n(\widehat{\lambda}_n) - R_n(\lambda_n) \rightarrow 0.
\label{eq:main}
\end{equation}
\end{theorem}
Essentially, this result states that under some conditions on the
design matrix $\mathbb{X}$ and the noise vector $W$, the predictive risk of
the lasso estimator with tuning parameter chosen via cross-validation
converges to the predictive risk of the lasso estimator with the
oracle tuning parameter. In other words, the typical procedure for a
data analyst is asymptotically equivalent to the optimal procedure. We
will take $\P=\prod_i P_i$ to be the $n$-fold product distribution of
the $W_i$'s and use $\mathbb{E}$ to denote the expected value
with respect to this product measure.
To prove this theorem, we show that $\sup_{\lambda \in
\Lambda}|\widehat R_n(\lambda) - R_n(\lambda)| \rightarrow 0$ in
probability. Then (\ref{eq:main}) follows as
\begin{align*}
R_n(\widehat{\lambda}_n) - R_n(\lambda_n)
& =
\left(R_n(\widehat{\lambda}_n) - \widehat R_n(\widehat{\lambda}_n)\right) +
\left(\widehat R_n(\widehat{\lambda}_n) - R_n(\lambda_n)\right) \\
& \leq
\left(R_n(\widehat{\lambda}_n) - \widehat R_n(\widehat{\lambda}_n)\right) +
\left(\widehat R_n(\lambda_n) - R_n(\lambda_n)\right) \\
& \leq
2 \sup_{\lambda\in\Lambda} \left(R_n(\lambda) - \widehat R_n(\lambda)\right) \\
& = o_\P(1).
\end{align*}
In fact, the term $R_n(\widehat{\lambda}_n) - R_n(\lambda_n)$
is non-stochastic (the expectation in the risk
integrates out the randomness in the data) and therefore convergence in
probability implies sequential convergence and hence $o_\P(1)=o(1)$.
We can write
\begin{align}
\lefteqn{|R_n(\lambda) - \hat R_n(\lambda)| }\notag\\
& =
\Bigg|\frac{1}{n}\mathbb{E} ||\mathbb{X}\hat\theta(\lambda)||_2^2 + \frac{1}{n}||\mathbb{X}\theta||_2^2 -
\frac{1}{n}2\mathbb{E}(\mathbb{X}\hat\theta(\lambda))^{\top}\mathbb{X}\theta + \sigma^2 \notag \\
& \qquad - \frac{1}{n} \sum_{i=1}^n \left(Y_i^2 +
(X_i^{\top}\hat\theta^{(i)}(\lambda))^2 - 2Y_iX_i^{\top}\hat\theta^{(i)}(\lambda)
\right)\Bigg| \notag \\
& \leq
\underbrace{\left| \frac{1}{n}\mathbb{E} ||\mathbb{X}\hat\theta(\lambda)||_2^2 - \frac{1}{n} \sum_{i=1}^n
(X_i^{\top}\hat\theta^{(i)}(\lambda))^2 \right|}_{(a)} +
\underbrace{2\left| \frac{1}{n}\mathbb{E}(\mathbb{X}\hat\theta(\lambda))^{\top}\mathbb{X}\theta - \frac{1}{n}
\sum_{i=1}^nY_iX_i^{\top}\hat\theta^{(i)}(\lambda) \right|}_{(b)} \notag\\
\label{eq:decomp}
&\qquad + \underbrace{\left| \frac{1}{n}||\mathbb{X}\theta||_2^2 + \sigma^2 - \frac{1}{n} \sum_{i=1}^n
Y_i^2\right|}_{(c)}.
\end{align}
Our proof follows by addressing $(a)$, $(b)$, and $(c)$ in lexicographic order in Section
\ref{sec:proofs}. To show that each term converges in probability to
zero uniformly in $\lambda$, we will need a few preliminary results.
\section{Preliminary material}
\label{sec:prelim-material}
In this section, we present some definitions and lemmas which are
useful for proving risk consistency of the lasso with cross-validated
tuning parameter. First, we give some results regarding the
uniform convergence of measurable functions. Next, we use these
results to show that the leave-one-out lasso estimator converges
uniformly to the full-sample lasso estimator. Finally, we present a
concentration inequality for quadratic forms of sub-Gaussian random
variables.
\subsection{Equicontinuity}
\label{sec:equicontinuity}
Our proof of Theorem \ref{thm:mainTheorem} uses a number of results
relating uniform convergence with convergence in probability. The
essential message is that particular measurable functions behave
nicely over compact sets. Mathematically, such collections of functions are called
{\em stochastically equicontinuous}.
To fix ideas, we first present the
definition of stochastic equicontinuity in the context of statistical
estimation. Suppose that we are
interested in estimating some functional of a parameter
$\beta$, $\overline{Q}_n(\beta)$,
using $\hat{Q}_n(\beta)$ where $\beta \in \mathcal{B}$.
\begin{definition}
[Stochastic equicontinuity]
If for every $\varepsilon, \eta>0$ there exists a random variable
$\Delta_n(\varepsilon,\eta)$ and constant $n_0(\varepsilon,\eta)$ such that for
$n\geq n_0(\varepsilon,\eta)$, $\P(|\Delta_n(\varepsilon,\eta)|>\varepsilon) < \eta$ and for
each $\beta \in \mathcal{B}$ there is an open set $\mathcal{N}(\beta,\varepsilon,\eta)$
containing $\beta$ such that for $n\geq n_0(\varepsilon,\eta)$,
\[
\sup_{\beta' \in \mathcal{N}(\beta,\varepsilon,\eta)} \left|
\hat{Q}_n(\beta')-\hat{Q}_n(\beta) \right| \leq
\Delta_n(\varepsilon,\eta),
\]
then we call $\{\hat{Q}_n\}$ \emph{stochastically equicontinuous over }$\mathcal{B}$.
\end{definition}
An alternative formulation of stochastic equicontinuity which is often
more useful can be found via a Lipschitz-type condition.
\begin{theorem}[Theorem 21.10 in~\citep{Davidson1994}]
\label{thm:davidsonSE}
Suppose there exists a random variable $B_n$ and a function $h$ such
that $B_n=O_\P(1)$ and for all $\beta',\beta \in \mathcal{B}$,
$|\hat{Q}_n(\beta') - \hat{Q}_n(\beta)| \leq B_n h(d(\beta',\beta))$,
where $h(x) \downarrow 0$ as $x \downarrow 0$ and $d$ is a metric on
$\mathcal{B}$. Then $\{\hat{Q}_n\}$ is stochastically equicontinuous.
\end{theorem}
The importance of stochastic equicontinuity is in showing uniform
convergence, as is expressed in the following two results.
\begin{theorem}
[Theorem 2.1 in~\citep{Newey1991}]
\label{thm:neweyUniform}
If $\mathcal{B}$ is compact, $|\hat{Q}_n(\beta) -
\overline{Q}_n(\beta)| = o_\P(1)$ for each $\beta \in \mathcal{B}$,
$\{\hat{Q}_n\}$ is stochastically equicontinuous over $\mathcal{B}$, and
$\{\overline{Q}_n\}$ is equicontinuous, then
$\sup_{\beta \in \mathcal{B}}
|\hat{Q}_n(\beta) - \overline{Q}_n(\beta)| = o_\P(1)$.
\end{theorem}
This theorem allows us to show uniform convergence of estimators
$\hat{Q}_n(\beta)$ of statistical functionals to $\overline{Q}_n(\beta)$
over compact sets $\mathcal{B}$. However, we may also be interested in
the uniform convergence of random quantities to each other. While one
could use the above theorem to show such a result, the following
theorem of \citep{Davidson1994} is often simpler.
\begin{theorem}
[\citep{Davidson1994}]
If $\mathcal{B}$ is compact, then
$ \sup_{\beta\in\mathcal{B}} G_n(\beta) = o_\P(1)$ if and only if
$G_n(\beta)=o_\P (1)$ for each
$\beta$ in a dense subset of $\mathcal{B}$ and $\{G_n(\beta)\}$ is
stochastically equicontinuous.
\label{thm:davidsonUniform}
\end{theorem}
\subsection{Uniform convergence of lasso estimators}
\label{sec:unif-conv-lasso}
Using stochastic equicontinuity, we prove two lemmas about
lasso estimators which, while intuitive, are nonetheless
novel. The first shows that the lasso estimator converges uniformly over
$\Lambda$ to its expectation. The second shows that the lasso estimator
computed using the full sample converges in probability uniformly
over $\Lambda$ to the lasso estimator computed with all but one observation.
Before stating our lemmas, we include without proof some standard results about
uniform convergence of functions.
A function $f: [a,b] \rightarrow
\mathbb{R}$ has the Luzin $N$ property if, for all $N \subset [a,b]$
that has Lebesgue measure zero, $f(N)$ has Lebesgue measure zero as
well. Also, a function $f$ is of bounded variation if and only if it
can be written as $f = f_1 - f_2$ for non-decreasing functions $f_1$
and $f_2$.
\begin{theorem}
A function $f$ is absolutely continuous if and only if it is of
bounded variation, continuous, and has the Luzin $N$
property.
\label{thm:absCont}
\end{theorem}
\begin{theorem}
If a function $f:[a,b] \rightarrow \mathbb{R}$ is absolutely
continuous, and hence differentiable almost everywhere, and satisfies
$|f'(x)| \leq C_L$ for almost all $x \in [a,b]$ with respect to
Lebesgue measure, then it is Lipschitz continuous with constant $C_L$.
\label{thm:lipschitz}
\end{theorem}
Throughout this paper, we use $C_L$ as generic notation for a Lipschitz constant; its actual value changes from line to line. The following result is useful for showing the uniform convergence $\hat\theta(\lambda)$.
\begin{proposition}
The random function $\hat\theta(\lambda)$ is Lipschitz continuous over $\Lambda$. That is, there exists $C_L < \infty$
such that
for any $\lambda,\lambda' \in \Lambda$,
\begin{equation}
\norm{\hat\theta(\lambda) - \hat\theta(\lambda') }_2 \leq C_L |\lambda - \lambda'|.
\end{equation}
Additionally, $C_L = O(1)$ as $n \rightarrow \infty$.
\label{prop:lassoLipschitz}
\end{proposition}
\begin{proof}
The solution path of the lasso is piecewise linear over $\lambda$ with a finite number of
`kinks.' Using the notation developed in \citep[Section 3.1]{Tibshirani2013}, over each such interval, the
nonzero entries in $\hat\theta(\lambda)$ behave as a linear function with slope $n(\mathbb{X}_{\mathcal{E}}^{\top}
\mathbb{X}_{\mathcal{E}})^{-1}s_\mathcal{E}$, where $\mathcal{E} \subset
\{1,\ldots,p\}$ is the set of the indices of the active variables, $s_\mathcal{E}$ is the vector of
signs, and
$\mathbb{X}_{\mathcal{E}}$ is the feature matrix with columns restricted to the
indices in $\mathcal{E}$.
Therefore, as $ \norm{n(\mathbb{X}_{\mathcal{E}}^{\top}
\mathbb{X}_{\mathcal{E}})^{-1}s_\mathcal{E}}_2 \leq \norm{n(\mathbb{X}_{\mathcal{E}}^{\top}
\mathbb{X}_{\mathcal{E}})^{-1}}_2$,
$\hat\theta(\lambda)$ is Lipschitz continuous with
\[
C_L = \max_{\mathcal{E} \subset \{1,\ldots,p\}}
\norm{n(\mathbb{X}_{\mathcal{E}}^{\top}\mathbb{X}_{\mathcal{E}})^{-1}}_2
\]
By Assumption A, for any $\mathcal{E}$, $\frac{1}{n} \mathbb{X}_{\mathcal{E}}^{\top} \mathbb{X}_{\mathcal{E}}
\rightarrow C_{\mathcal{E}}$. Also, $\textrm{eigen}_{\min}(C_{\mathcal{E}}) \geq c_{\min}$
for any $\mathcal{E}$. Fix $\epsilon = c_{\min}/2$. Then, there exists
an $N$ such that for all $n \geq N$ and any $\mathcal{E}$,
\begin{equation}
\frac{1}{n} \textrm{eigen}_{\min} (\mathbb{X}_{\mathcal{E}}^{\top}\mathbb{X}_{\mathcal{E}}) \geq \epsilon.
\label{eq:lassoLipBound}
\end{equation}
Therefore, for $n$ large enough, $C_L \leq \frac{1}{\epsilon} < \infty$, which is independent of $n$.
\end{proof}
\begin{lemma}
For any $i = 1,\ldots,n$,
\[
\sup_{\lambda \in \Lambda} ||\hat\theta(\lambda) - \hat\theta^{(i)}(\lambda)||_2
\xrightarrow{\P} 0.
\]
\label{lem:lynchpin}
\end{lemma}
\begin{proof}
The pointwise convergence of $ ||\hat\theta(\lambda) -
\hat\theta^{(i)}(\lambda)||_2$ to zero follows by \citep[Theorem 1]{FuKnight2000}. Hence,
we invoke the consequent of Theorem~\ref{thm:davidsonUniform} as long as
$||\hat\theta(\lambda) - \hat\theta^{(i)}(\lambda)||_2$ is stochastically
equicontinuous. For this, it is sufficient to show that
$\hat\theta(\lambda)$ and $ \hat\theta^{(i)}(\lambda)$ are Lipschitz in the sense
of Theorem \ref{thm:davidsonSE}. This follows for both estimators by Proposition \ref{prop:lassoLipschitz}.
\end{proof}
\begin{lemma}
\label{lem:a1a}
For all $1\leq j\leq p$, $\{\hat\theta_j(\lambda)\}$ is stochastically equicontinuous, $\{\mathbb{E}[\hat\theta_j(\lambda)]\}$ is equicontinuous, and
$| \hat\theta_j(\lambda) - \mathbb{E}\hat\theta_j(\lambda)| = o_{\mathbb{P}}(1)$.
Thus,
\[
\sup_{\lambda\in\Lambda} | \hat\theta_j(\lambda) - \mathbb{E}\hat\theta_j(\lambda)| = o_{\mathbb{P}}(1).
\]
Furthermore,
\[
\sup_{\lambda\in\Lambda} \norm{ \hat\theta(\lambda) - \mathbb{E}\hat\theta(\lambda)}_{C_n}^2 = o_{\mathbb{P}}(1),
\]
where this notation is introduced in equation \eqref{eq:trueRisk}.
\end{lemma}
\begin{proof
To show this claim, we use Theorem \ref{thm:neweyUniform}.
For pointwise convergence, note that $\hat\theta(\lambda)$ converges in probability to an non-stochastic limit
\citep[Theorem 1]{FuKnight2000}, call it $\theta(\lambda)$.
Also, $|\hat\theta_j(\lambda)| \leq \norm{\hat\theta(0)}_1$, which is integrable.
By the Skorohod representation theorem, there exists random variables $\hat\theta_j(\lambda)'$
such that $\hat\theta_j(\lambda)' \rightarrow \theta(\lambda)$ almost surely and $\hat\theta_j(\lambda)'$ has the same
distribution as $\hat\theta_j(\lambda)$ for each $n$.
By the dominated convergence theorem,
\[
\lim \mathbb{E} \hat\theta_j(\lambda) = \lim \mathbb{E} \hat\theta_j(\lambda)' = \mathbb{E} \theta(\lambda) = \theta(\lambda).
\]
Therefore, $|\hat\theta_j(\lambda) - \mathbb{E} \hat\theta_j(\lambda)| \rightarrow 0$ in probability.
Stochastic equicontinuity follows by Proposition
\ref{prop:lassoLipschitz} and Theorem \ref{thm:davidsonSE}.
Hence, Theorem~\ref{thm:neweyUniform} is satisfied as long as
$\{\mathbb{E}\hat\theta_j(\lambda)\}$ is equicontinuous. Observe that
the expectation and differentiation operations commute for
$\hat\theta(\lambda)$. Therefore, the result follows by Proposition \ref{prop:lassoLipschitz}.
Finally, we have
\begin{align*}
\norm{\hat\theta(\lambda)-\mathbb{E}\hat\theta(\lambda)}_{C_n}^2
& =
(\hat\theta(\lambda) - \mathbb{E}\hat\theta(\lambda))^{\top} C_n (\hat\theta(\lambda) - \mathbb{E}\hat\theta(\lambda)) \\
& \leq
\norm{\hat\theta(\lambda) - \mathbb{E}\hat\theta(\lambda)}_2 \norm{C_n (\hat\theta(\lambda) - \mathbb{E}\hat\theta(\lambda))}_2 \\
& \leq
\norm{\hat\theta(\lambda) - \mathbb{E}\hat\theta(\lambda)}_2^2 \norm{C_n}_2 \\
& =
\norm{C_n}_2 \sum_{j=1}^p | \hat\theta_j(\lambda) - \mathbb{E}\hat\theta_j(\lambda)|^2,
\end{align*}
which goes to zero uniformly, as $\norm{C_n}_2 \rightarrow \norm{C}_2< \infty$
\end{proof}
\subsection{Concentration of measure for quadratic forms}
\label{sec:conc-quadr-forms}
Finally, we present a
special case of Theorem 1 in \citep{HsuKakade2011}
which will allow us to prove that part $(c)$ in the decomposition
converges to zero in probability.
\begin{lemma}
\label{lem:subg-quad}
Let $Z\in\mathbb{R}^n$ be a random vector with mean vector $\mu$ satisfying
\[
\mathbb{E}\left[ \exp \left(\alpha^\top (Z-\mu)\right)\right] \leq
\exp\left( \norm{\alpha}^2\tau^2/2\right)
\]
for some $\tau>0$ and all $\alpha\in \mathbb{R}^n$. Then, for all $\epsilon>0$,
\[
P\left( \left| \frac{1}{n} Z^\top Z - \norm{\mu}^2 - \sigma^2\right| >
\epsilon \right) \leq 2e^{-n\epsilon^2}.
\]
\end{lemma}
\begin{proof
This result follows from a result in \citep{HsuKakade2011} (see
also \citep{HansonWright1971}) which we have included in the appendix.
By that result with $A=I$, we have
\begin{align}
P\left( \frac{1}{n} Z^\top Z - \norm{\mu} - \tau^2
> 2\sqrt{\frac{t}{n}} \left( \tau^2\left(1+2\sqrt{\frac{t}{n}}\right) +
\norm{\mu}^2\right) \right) \leq e^{-n\sqrt{\frac{t}{n}}^2}
\end{align}
Setting $\delta = \sqrt{t/n}$ and $\epsilon=2\delta \left( \tau^2\left(1+2\delta\right) +
\norm{\mu}^2\right)$, we can solve for $\delta$. The quadratic
formula gives (under the constraint $\delta>0$)
\[
\delta = \frac{\sqrt{(\tau^2 + \norm{\mu}^2)^2 +
4\tau^2\epsilon} - \tau^2 -\norm{\mu}^2} {4\tau^2} \geq \epsilon
\]
by concavity of $\sqrt{\cdot}$.
Thus, for any $\epsilon>0$,
\[
P\left( \frac{1}{n} Z^\top Z - \norm{\mu} - \tau^2
> \epsilon \right) \leq e^{-n\delta^2} \leq e^{-n\epsilon^2}
\]
The same argument can be applied symmetrically. A union bound
gives the result.
\end{proof}
\section{Proofs}
\label{sec:proofs}
In this section, we address each component of the decomposition in
\eqref{eq:decomp}. Parts $(a)$ and $(b)$ follow from
uniform convergence of the lasso estimator to its expectation
(Lemma~\ref{lem:a1a}) and asymptotic equivalence of the
leave-one-out lasso estimator and the full-sample lasso estimator
(Lemma~\ref{lem:lynchpin}) while
part $(c)$ requires the sub-Gaussian concentration of measure result
in Lemma~\ref{lem:subg-quad}.
\begin{proposition}[Part $(a)$]
\label{thm:alpha}
\[
\sup_{\lambda \in \Lambda} \left|\frac{1}{n} \mathbb{E} \norm{\mathbb{X}\hat\theta(\lambda)}_2^2 - \frac{1}{n}
\sum_{i=1}^n \left(X_i^{\top}\hat\theta^{(i)}(\lambda)\right)^2 \right| =
o_{\mathbb{P}}(1).
\]
\end{proposition}
\begin{proof
Observe
\begin{align*}
\lefteqn{\left| \frac{1}{n}\mathbb{E} ||\mathbb{X}\hat\theta(\lambda)||_2^2
- \frac{1}{n} \sum_{i=1}^n (X_i^{\top}\hat\theta^{(i)}(\lambda))^2 \right| }\notag\\
& \leq
\underbrace{\bigg|\frac{1}{n} \mathbb{E} ||\mathbb{X}\hat\theta(\lambda)||_2^2 -
\frac{1}{n} ||\mathbb{X}\hat\theta(\lambda)||_2^2 \bigg|}_{(ai)}
+
\underbrace{\bigg|\frac{1}{n}\norm{\mathbb{X}\hat\theta(\lambda)}_2^2 - \frac{1}{n} \sum_{i=1}^n
(X_i^{\top}\hat\theta^{(i)}(\lambda))^2 \bigg|}_{(aii)}
\end{align*}
For $(ai)$, note that $\mathbb{E} ||\mathbb{X}\hat\theta(\lambda)||_2^2 = \textrm{trace}(\mathbb{X}^{\top}\mathbb{X}\mathbb{V} \hat\theta(\lambda)) + \norm{\mathbb{X}\mathbb{E} \hat\theta(\lambda)}_2^2$.
Hence,
\begin{align*}
(ai)
& \leq
\left| \textrm{trace}(C_n\mathbb{V} \hat\theta(\lambda))\right| +
\frac{1}{n}\left|\norm{\mathbb{E} \mathbb{X}\hat\theta(\lambda)}_2^2 - \norm{\mathbb{X}\hat\theta(\lambda)}_2^2\right|
\\
& \leq
\norm{C_n}_F\norm{\mathbb{V} \hat\theta(\lambda)}_F +
\frac{1}{n}\left|\norm{\mathbb{E} \mathbb{X}\hat\theta(\lambda)}_2^2 - \norm{\mathbb{X}\hat\theta(\lambda)}_2^2\right|
\\
& \leq
\sigma^2\norm{C_n}_F\norm{(\mathbb{X}^\top\mathbb{X})^{-1}}_F +
\frac{1}{n}\left|\norm{\mathbb{E} \mathbb{X}\hat\theta(\lambda)}_2^2 - \norm{\mathbb{X}\hat\theta(\lambda)}_2^2\right|
\\
& =
\frac{\sigma^2}{n}\norm{C_n}_F\norm{C_n^{-1}}_F +
\norm{\hat\theta(\lambda) + \mathbb{E}\hat\theta(\lambda)}_{C_n}\norm{\hat\theta(\lambda) - \mathbb{E}\hat\theta(\lambda)}_{C_n}.
\end{align*}
This term goes to zero uniformly by Lemma~\ref{lem:a1a}. The third
inequality follows from \citep[equation 4.1]{OsbornePresnell2000}.
For $(aii)$, note that
\begin{align}
\frac{1}{n} \left |||\mathbb{X}\hat\theta(\lambda)||_2^2 -\sum_{i=1}^n
(X_i^{\top}\hat\theta^{(i)}(\lambda))^2 \right|
& =
\frac{1}{n}\left| \sum_{i=1}^n \left(
(X_i^{\top} \hat\theta(\lambda))^2
- (X_i^{\top}\hat\theta^{(i)}(\lambda))^2 \right) \right| \notag \\
& \leq
\frac{1}{n}\sum_{i=1}^n \left|
(X_i^{\top} \hat\theta(\lambda))^2
- (X_i^{\top}\hat\theta^{(i)}(\lambda))^2 \right| \notag \\
& =
\frac{1}{n}\sum_{i=1}^n \left|
X_i^{\top} \hat\theta(\lambda)\lasso^{\top}X_i
- X_i^{\top}\hat\theta^{(i)}(\lambda)\thetaLOOnoj^{\top}X_i \right| \notag \\
& =
\frac{1}{n}\sum_{i=1}^n \left|
X_i^{\top}\left( \hat\theta(\lambda)\lasso^{\top} -
\hat\theta^{(i)}(\lambda)\thetaLOOnoj^{\top} \right)X_i \right| \notag \\
&\leq\frac{1}{n}\sum_{i=1}^n \norm{X_i}_2^2
\norm{\hat\theta(\lambda)\lasso^{\top} -
\hat\theta^{(i)}(\lambda)\thetaLOOnoj^{\top}}_{F} \label{eq:maxNormDiff}.
\end{align}
The
term $||X_i||_2^2\leq C_X^2$ by Assumption B. Furthermore,
\begin{align*}
\lefteqn{\norm{\hat\theta(\lambda)\lasso^{\top} -
\hat\theta^{(i)}(\lambda)\thetaLOOnoj^{\top}}_F}\\
& = \norm{\hat\theta(\lambda)}_2^4 + \norm{\hat\theta^{(i)}(\lambda)}_2^4 -2 (\hat\theta(\lambda)^\top
\hat\theta^{(i)}(\lambda))^2\\
&= \hat\theta(\lambda)^\top \left(\hat\theta(\lambda) - \hat\theta^{(i)}(\lambda)\right) \left(\hat\theta(\lambda) +
\hat\theta^{(i)}(\lambda)\right) +\\
&\quad+ \hat\theta^{(i)}(\lambda)^\top\left(\hat\theta^{(i)}(\lambda)-\hat\theta(\lambda)\right) \left(\hat\theta^{(i)}(\lambda)+\hat\theta(\lambda)\right)\\
&\leq\left(\norm{\hat\theta(\lambda)}_2+\norm{\hat\theta^{(i)}(\lambda)}_2\right)\norm{\hat\theta(\lambda)+
\hat\theta^{(i)}(\lambda)}_2 \norm{\hat\theta(\lambda)-\hat\theta^{(i)}(\lambda)}_2\\
&\leq\left(\norm{\hat\theta(0)}_2+\norm{\hat\theta^{(i)}(0)}_2\right)\norm{\hat\theta(\lambda)+
\hat\theta^{(i)}(\lambda)}_2 \norm{\hat\theta(\lambda)-\hat\theta^{(i)}(\lambda)}_2
\end{align*}
Hence, by Lemma~\ref{lem:lynchpin}, equation \eqref{eq:maxNormDiff}
goes to zero in probability
uniformly over $\lambda \in \Lambda$.
\end{proof}
\begin{proposition}[Part $(b)$] %
\label{thm:bravo}
\[
\sup_{\lambda\in\Lambda} \left| \frac{1}{n}\mathbb{E}(\mathbb{X}\hat\theta(\lambda))^{\top}\mathbb{X}\theta -
\frac{1}{n} \sum_{i=1}^nY_iX_i^{\top}\hat\theta^{(i)}(\lambda) \right| =
o_{\mathbb{P}}(1).
\]
\end{proposition}
\begin{proof
Observe,
\begin{align}
\sum_{i=1}^n Y_iX_i^{\top}\hat\theta^{(i)}(\lambda)
& =
\sum_{i=1}^n (X_i^{\top} \theta + \sigma^2 W_i)(X_i^{\top}\hat\theta^{(i)}(\lambda) ) \\
& =
\sum_{i=1}^n X_i^{\top} \theta X_i^{\top}\hat\theta^{(i)}(\lambda) + \sum_{i=1}^n
\sigma^2 W_i X_i^{\top}\hat\theta^{(i)}(\lambda).
\end{align}
So,
\begin{align*}
\lefteqn{\left| \frac{1}{n}\mathbb{E}(\mathbb{X}\hat\theta(\lambda))^{\top}\mathbb{X}\theta - \frac{1}{n}
\sum_{i=1}^nY_iX_i^{\top}\hat\theta^{(i)}(\lambda) \right| }\\
& \leq \left| \mathbb{E}\hat\theta(\lambda)^{\top}C_n\theta -
\hat\theta(\lambda)^{\top}C_n\theta \right| +
\left|\hat\theta(\lambda)^{\top}C_n\theta - \frac{1}{n}
\sum_{i=1}^n Y_iX_i^{\top}\hat\theta^{(i)}(\lambda) \right| \\
& = \left|(\mathbb{E} \hat\theta(\lambda) -
\hat\theta(\lambda))^{\top}C_n\theta \right| +
\left|\hat\theta(\lambda)^{\top}C_n\theta - \frac{1}{n}
\sum_{i=1}^n Y_iX_i^{\top}\hat\theta^{(i)}(\lambda) \right| \\
& \leq \underbrace{\norm{\mathbb{E} \hat\theta(\lambda) -
\hat\theta(\lambda)}_{C_n} \norm{\theta}_{C_n}}_{(bi)} +
\underbrace{\left| \frac{1}{n}\hat\theta(\lambda)^\top\mathbb{X}^{\top}\mathbb{X}\theta -
\frac{1}{n}\sum_{i=1}^n X_i^{\top} \theta X_i^{\top}\hat\theta^{(i)}(\lambda)
\right|}_{(bii)} + \\
& \qquad + \underbrace{\left|\frac{1}{n}\sum_{i=1}^n \sigma^2 W_i
X_i^{\top}\hat\theta^{(i)}(\lambda) \right|}_{(biii)}.
\end{align*}
By Lemma \ref{lem:a1a}, $(bi)$ goes to zero uniformly. For
$(bii)$,
\begin{align*}
\frac{1}{n}\left| \hat\theta(\lambda)^\top\mathbb{X}^{\top}\mathbb{X}\theta - \sum_{i=1}^n
X_i^{\top} \theta X_i^{\top}\hat\theta^{(i)}(\lambda) \right|
& =
\frac{1}{n} \left| \sum_{i=1}^n \theta^{\top} X_iX_i^{\top}
\left( \hat\theta(\lambda) - \hat\theta^{(i)}(\lambda)\right) \right|\\
& \leq
\frac{1}{n}\sum_{i=1}^n \left( || \theta ||_2 \norm{X_i}_2^2
\norm{\hat\theta(\lambda) - \hat\theta^{(i)}(\lambda)}_2\right)\\
& \leq
C_{\theta} C_X^2\frac{1}{n}\sum_{i=1}^n
\norm{\hat\theta(\lambda) - \hat\theta^{(i)}(\lambda)}_2 .
\end{align*}
This goes to zero uniformly by Lemma~\ref{lem:lynchpin}.
For $(biii)$, $||\hat\theta^{(i)}(\lambda)||_1 \leq ||\hat\theta^{(i)}(0)||_1$ for any $\lambda,i$. So:
\begin{align*}
\left|\frac{1}{n}\sum_{i=1}^n \sigma^2 W_i X_i^{\top}\hat\theta^{(i)}(\lambda)
\right|
& =
\frac{\sigma^2}{n}\left|\sum_{i=1}^n W_i X_i^{\top}\hat\theta^{(i)}(\lambda)
\right| \\
& \leq
\frac{\sigma^2}{n}\left|\sum_{i=1}^n W_i \norm{X_i}_\infty
\norm{\hat\theta^{(i)}(\lambda)}_1 \right| \\
& \leq \frac{\sigma^2C_X}{n}\left|\sum_{i=1}^n W_i
\norm{\hat\theta^{(i)}(0)}_1 \right| \xrightarrow{ae} 0.
\end{align*}
The proof of almost-everywhere convergence is given in the
appendix. This completes the
proof of Proposition~\ref{thm:bravo}.
\end{proof}
\begin{proposition}[Part $(c)$] %
\label{thm:charlie}
\[
\left| ||\mathbb{X}\theta||_2^2 + \sigma^2 - \frac{1}{n} \sum_{i=1}^n Y_i^2\right|= o_{\mathbb{P}}(1).
\]
\end{proposition}
\begin{proof
By assumption, $E_{P_i}\left[e^{tW_i}\right] \leq e^{\tau^2 t^2/2}$
for all $t\in\mathbb{R}$. Thus, for any $\alpha\in\mathbb{R}^n$,
\begin{align}
\mathbb{E}\left[ \exp\left( \alpha^\top (Y-\mathbb{X}\theta)\right) \right] &=
\mathbb{E}\left[ \exp\left( \sum_{i=1}^n \alpha_i (Y_i-X_i^\top
\theta)\right) \right] \\
&= \mathbb{E}\left[ \exp\left( \sum_{i=1}^n \alpha_i W_i \right) \right]\\
&=\prod_{i=1}^n E_{P_i}\left[ \exp\left(\alpha_i W_i \right) \right] \\
& \leq \prod_{i=1}^n \exp\left(\alpha_i^2 \tau^2/2 \right)\\
&= \exp\left(\norm{\alpha}_2^2 \tau^2/2\right).
\end{align}
Therefore, we can apply Lemma \ref{lem:subg-quad} with
$\mu=\mathbb{X}\theta$.
\end{proof}
By Propositions~\ref{thm:alpha}, \ref{thm:bravo} and~\ref{thm:charlie}, each term
in \eqref{eq:decomp} converges uniformly in probability to zero thus
completing the proof of Theorem~\ref{thm:mainTheorem}.
\section{Discussion and future work}
\label{sec:discussion}
A common practice in data analysis is to estimate
the coefficients of a linear model with the lasso
and choose the regularization parameter by cross-validation.
Unfortunately, no definitive theoretical results
existed as to the effect of choosing the tuning parameter in this data-dependent
way. In this paper, we provide a solution to the
problem by demonstrating, under particular assumptions
on the design matrix, that the lasso is risk consistent
even when the tuning parameter is
selected via leave-one-out cross-validation.
However, a number of
important open questions remain. The first is to generalize to other
forms of cross-validation, especially $K$-fold. In fact, this
generalization should be possible using the methods developed
herein. Lemma~\ref{lem:lynchpin} holds when more than
one training example is held out, provided that the size of the
datasets used to form the estimators still increases to infinity with
$n$. Furthermore, with careful accounting of the held out sets,
Proposition~\ref{thm:bravo} should hold as well.
A second question is to determine whether cross-validation holds in
the high-dimensional setting where $p>n$. However, our methods
are not easily extensible to this setting. We rely heavily on
Assumption A which says that $n^{-1}\mathbb{X}^\top\mathbb{X}$ has a positive definite
limit as well as the related results of~\citep{FuKnight2000} which are
not available in high dimensions or with random design.
Additionally, an interesting relaxation of our results would be to assume that
the matrices $C_n$ are all non-singular, but tend to a singular limit. This would
provide a more realistic scenario where regularization is more definitively useful.
Finally, one of the main benefits of lasso is its ability to induce
sparsity and hence perform variable selection. While
selecting the correct model is far more relevant in high dimensions,
it may well be desirable in other settings as well. As mentioned in
the introduction, various authors have shown that cross-validation and
model selection are in some sense incompatible. In particular, CV
is inconsistent for model selection. Secondly, using prediction
accuracy (which is what $\hat{R}_n(\lambda)$ is estimating) as the
method for choosing $\lambda$ fails to recover the sparsity pattern
even under orthogonal design. Thus, while we show that the
predictions of the model are asymptotically equivalent to those with
the optimal tuning parameter, we should not expect to have the
correct model even if $\theta$ were sparse. In particular,
$\hat{\theta}(\lambda)$ does not necessarily converge to the OLS estimator, and
may not converge to $\theta$. We do show (Lemma~\ref{lem:a1a}) that
$\hat{\theta}(\lambda)$ converges to its expectation uniformly for all $\lambda$. While this
expectation may be sparse, it may not be. But we are unable to show
that with cross-validated tuning parameter, the lasso will select the
\emph{correct} model. While this is not surprising in light of
previous research, neither is it
comforting. The question of whether lasso with
cross-validated tuning parameter can recover an unknown sparsity
pattern remains open. Empirically, our experience is that
cross-validated tuning parameters lead
to over-parameterized estimated models, but this has yet to be
validated theoretically.
|
{
"timestamp": "2013-08-06T02:05:06",
"yymm": "1206",
"arxiv_id": "1206.6128",
"language": "en",
"url": "https://arxiv.org/abs/1206.6128"
}
|
\section{Introduction}
Let $X$ be a smooth projective variety over an algebraically closed field $k$.
Recall that an ${\mathbf R}$-divisor on $X$ is \emph{pseudo-effective} if its numerical class lies in the
closure of the set of classes of effective ${\mathbf R}$-divisors on $X$. Following \cite{Nakayama},
we define the \emph{numerical dimension} $\kappa_{\sigma}(D)$ of an ${\mathbf R}$-divisor $D$ as
the largest non-negative integer $\ell$ such that for some ample divisor $A$ one has
$$\liminf_{m\to\infty} \frac {h^0(X,\mathcal{O}_X(\lfloor mD\rfloor+A))}{m^{\ell}}>0$$
(if there is no such $\ell\geq 0$, then $\kappa_{\sigma}(D)=-\infty$). In fact, $\kappa_{\sigma}(D)
\geq 0$ if and only if $D$ is pseudo-effective.
The following is the main result of this note:
\begin{theorem}\label{thm_main}
Suppose that $X$ is a smooth projective variety over an algebraically closed field $k$ of positive characteristic. If $D$ is a pseudo-effective ${\mathbf R}$-divisor
on $X$ which is not numerically equivalent to the negative part $N_{\sigma}(D)$ in its divisorial Zariski
decomposition, then $\kappa_{\sigma}(D)\geq 1$, that is, there is an ample divisor
$A$ on $X$ and $C>0$ such that
$$h^0(X,\mathcal{O}_X(\lfloor mD\rfloor+A))\geq Cm\,\,\text{for all}\,\,m\gg 0.$$
\end{theorem}
The above theorem was proved by N.~Nakayama in \cite[Thm.~V.1.12]{Nakayama} in characteristic
zero, using the Kawamata-Viehweg vanishing theorem. That result turned out to be useful in many situations
when dealing with pseudo-effective divisors. For example, it was used in the proof of the
non-vanishing theorem, an important ingredient in proving the finite generation of the canonical ring, see \cite[Lem.~6.1]{BCHM}. We note that a large part of the results in birational geometry in characteristic zero
rely on the use of vanishing theorems. The above result illustrates
our belief that in spite of the failure of vanishing theorems in positive characteristic,
several results can still be recovered by making systematic use of the Frobenius morphism.
Let us say a few words about the divisorial Zariski decomposition that appears in the above theorem; for details, see \S 2. If $D$ is a pseudo-effective divisor, then to every prime divisor
$\Gamma$ on $X$ one can associate
a non-negative real number $\sigma_{\Gamma}(D)$ that only depends on the numerical class of $D$. It is known that there are only
finitely many prime divisors $\Gamma$ with $\sigma_{\Gamma}(D)>0$.
The \emph{negative part
in the divisorial Zariski decomposition of $D$} is
$$N_{\sigma}(D):=\sum_{\Gamma}\sigma_{\Gamma}(D)\Gamma.$$
Given an arbitrary ${\mathbf R}$-divisor $D$ on $X$, the
\emph{non-nef locus} of $D$ is the union
$${\mathbf B}_-(D):=\bigcup_A\mathbf {SB}(D+A),$$
where $A$ varies over the ample divisors $A$ on $X$ such that $D+A$ is a ${\mathbf Q}$-divisor,
and $\mathbf {SB}(E)$ denotes the stable base locus of a ${\mathbf Q}$-divisor $E$.
One can show that, in fact, ${\mathbf B}_-(D)$ is a countable union of Zariski closed subsets.
Note that
${\mathbf B}_-(D)$ is empty if and only if $D$ is nef.
If we assume that
the ground field is uncountable, then ${\mathbf B}_-(D)=X$
if and only if $D$ is not pseudo-effective. Furthermore, in this case
a prime divisor $\Gamma$ is contained in ${\mathbf B}_-(D)$ if and only if
$\sigma_{\Gamma}(D)>0$.
The assertion in Theorem~\ref{thm_main} is proved by showing that after
replacing $D$ by $D-N_{\sigma}(D)$, we can find an ample divisor $A$ such that the sections
of $\mathcal{O}_X(\lfloor mD\rfloor+A)$ can be lifted from suitable curves to $X$.
The following is another result in this direction.
\begin{theorem}\label{p_lifting}
Let $X$ be a smooth projective variety over an algebraically closed field of positive characteristic, and let $D$ be an ${\mathbf R}$-divisor on $X$. Suppose that $H=H_1+\ldots+H_r$ is a simple normal crossing divisor on
$X$, with $r<\dim(X)$,
and that
$W:=H_1\cap\dots\cap H_r$ does not intersect the non-nef locus $\mathbf B_-(D)$ of $D$.
In this case there exists an ample divisor $A$ on $X$ such that the restriction map
\begin{equation*}\label{0}
H^0(X,\mathcal{O}_X(\lfloor mD\rfloor +A))\to H^0(W,\mathcal{O}_X(\lfloor mD\rfloor +A)\vert_W)
\end{equation*}
is surjective for every $m\ge 1$.
\end{theorem}
For a version of this result in characteristic zero, see \cite[Prop.~V.1.14]{Nakayama}. The proofs
of Theorems~\ref{thm_main} and \ref{p_lifting} make essential use of the Frobenius morphism.
In any characteristic, there are several possible candidates for the notion of numerical dimension
of a pseudo-effective divisor. In characteristic zero, B.~Lehmann showed in
\cite{Lehmann} that all these definitions are equivalent. We expect that a similar result
holds also in positive characteristic, but we do not pursue this direction here.
We only show, using Theorem~\ref{p_lifting}, that in the case of a nef divisor $D$, the
numerical dimension of $D$ as defined above can also be described as the largest
integer $j\geq 0$ such that the cycle class $D^j$ is not numerically trivial (this is the
definition in \cite{Kawamata}). This is done in Proposition \ref{prop.KawamataNumericalDimensionEquivalent}.
The paper is organized as follows. In the next section we collect some basic facts about
pseudo-effective divisors, non-nef loci, and divisorial Zariski decompositions.
\S 3 is devoted to proving a general vanishing result, valid in arbitrary characteristic, which
only uses
asymptotic Serre vanishing. This is then used in \S 4
to prove the two results stated above.
\subsection*{Acknowledgment}
We are grateful to J\'{a}nos Koll\'{a}r and Burt Totaro for comments on a preliminary version of the manuscript, and to
Rob Lazarsfeld for some helpful discussions.
\section{Review of basic invariants of pseudo-effective divisors}
We start by recalling some basic definitions and notation. We work over a fixed
algebraically closed field $k$. For now, we do not make any assumption on the characteristic of $k$. All varieties are assumed to be reduced and irreducible.
Let $X$ be an $n$-dimensional normal projective variety over $k$.
A divisor
(${\mathbf Q}$-divisor, ${\mathbf R}$-divisor) on $X$ is a linear combination with integer (respectively, rational
or real) coefficients of prime divisors. If $D=\sum_{i=1}^da_iD_i$ is an ${\mathbf R}$-divisor on $X$
(with the $D_i$ distinct prime divisors), then we put
$\lfloor D\rfloor:=\sum_{i=1}^d\lfloor a_i\rfloor D_i$ and $\lceil D\rceil:=\sum_{i=1}^d\lceil a_i
\rceil D_i$, where for a real number $u$, we denote by $\lfloor u\rfloor$ and $\lceil u\rceil$
the largest (respectively, smallest) integer that is $\leq u$ (respectively, $\geq u$).
We denote by ${\rm Cart}(X)$ the group of Cartier divisors on $X$.
An ${\mathbf R}$-Cartier ${\mathbf R}$-divisor on $X$ is an element of ${\rm Cart}(X)\otimes_{{\mathbf Z}}{\mathbf R}$.
We consider this real vector space as a subspace of the space of ${\mathbf R}$-divisors.
We will mostly be concerned with the case when $X$ is smooth, when every ${\mathbf R}$-divisor
is ${\mathbf R}$-Cartier.
We denote by
$\NS^1(X)_{{\mathbf R}}$ the quotient of ${\rm Pic}(X)\otimes_{{\mathbf Z}}{\mathbf R}$ by the numerical equivalence
relation. This is a finite-dimensional real vector space.
The cone of \emph{pseudo-effective} classes is the closure
of the cone generated by classes of line bundles $L$ with $h^0(X,L)\geq 1$.
Its interior is the cone of \emph{big} classes.
Suppose now that $X$ is a smooth projective variety.
If $D$ is a divisor, then we denote by ${\rm Bs}(|D|)$ the base-locus of the linear system
$|D|$, considered with the reduced scheme structure. If $D$ is a ${\mathbf Q}$-divisor, then
the \emph{stable base locus} $\mathbf {SB}(D)$ is the intersection
$\bigcap_m{\rm Bs}(|mD|)$, where $m$ varies over the positive integers such that
$mD$ has integer coefficients. This is a Zariski closed subset, and it is a consequence
of the Noetherian property that $\mathbf {SB}(D)={\rm Bs}(|mD|)$ whenever $m$ is divisible enough.
The \emph{non-nef locus} of an ${\mathbf R}$-divisor $D$ is defined as
$$\B_-(D):=\bigcup_A\mathbf {SB}(D+A),$$ where $A$ varies over the
ample ${\mathbf R}$-divisors such that $D+A$ has ${\mathbf Q}$-coefficients. It is easy to see
that if $(A_m)_{m\geq 1}$ is a sequence of ample ${\mathbf R}$-divisors whose classes in $\NS^1(X)_{{\mathbf R}}$ go to zero,
and such that $D+A_m$ is a ${\mathbf Q}$-divisor for every $m$,
then $\B_-(D)=\bigcup_{m\geq 1}\mathbf {SB}(D+A_m)$. In particular,
$\B_-(D)$ is a countable union of Zariski closed subsets (it was recently shown by John Lesieutre that $\B_-(D)$ is not always closed \cite{Lesieutre}).
It follows from definition that $\B_-(D)$ only depends on the numerical equivalence
class of $D$. Furthermore,
$\B_-(D)$ is empty if and only if $D$ is nef; if $k$ is uncountable, then
$\B_-(D)$ is a proper subset of $X$ if and only if $D$ is pseudo-effective.
For these facts about the non-nef locus and for a more detailed discussion, see
\cite{ELMNP}.
We now recall the asymptotic order of vanishing function and the divisorial Zariski decomposition, both due to Nakayama \cite{Nakayama}. For the proofs of the results
that we list below we refer to \cite[Chap. III]{Nakayama} for the case ${\rm char}(k)=0$,
and to \cite{Mustata} for the case ${\rm char}(k)>0$. Let $\Gamma$ be a prime divisor on $X$.
Suppose first that $D$ is a big ${\mathbf Q}$-divisor. Consider a positive integer $m$ such that $mD$ has integer coefficients and \mbox{$h^0(X,\mathcal{O}_X(mD))>0$.} In this case we denote by
$\ord_{\Gamma}|mD|$ the coefficient of $\Gamma$ in a general element in $|mD|$.
One defines
$$\ord_{\Gamma}\parallel D\parallel:=\inf_m\frac{\ord_{\Gamma}|mD|}{m}=
\lim_{m\to\infty}\frac{\ord_{\Gamma}|mD|}{m},$$
where $m$ is as above.
One can show that $\ord_{\Gamma}\parallel D\parallel$ only depends on the numerical
equivalence class of $D$, and the map $D\to \ord_{\Gamma}\parallel D\parallel$ extends to a continuous
map (denoted in the same way) on the big cone. Furthermore, if $D$ is a pseudo-effective
${\mathbf R}$-divisor on $X$, we put
\begin{equation}\label{def_order}
\sigma_{\Gamma}(D):=
\sup_A\ord_{\Gamma}\parallel D+A\parallel,
\end{equation}
where $A$ varies over the ample ${\mathbf R}$-divisors on $X$, or equivalently,
over a sequence of ample ${\mathbf R}$-divisors whose classes in $\NS^1(X)_{{\mathbf R}}$ go to zero.
It follows from definition that if $D'$ is a big ${\mathbf R}$-divisor and $A$ is ample, then
$$\ord_{\Gamma}\parallel D'\parallel (\Gamma\cdot A^{n-1})\leq (D'\cdot A^{n-1}).$$
We deduce that if $D$ is pseudo-effective and $A$ is a fixed ample ${\mathbf R}$-divisor, then
$$\sigma_{\Gamma}(D)\leq\lim_{\epsilon\to 0}\frac{((D+\epsilon A)\cdot A^{n-1})}
{(\Gamma\cdot A^{n-1})}=\frac{(D\cdot A^{n-1})}{(\Gamma\cdot A^{n-1})}<\infty.$$
The function
$\sigma_{\Gamma}$ is lower semi-continuous on the pseudo-effective cone,
and it agrees with $\ord_{\Gamma}\parallel-\parallel$ on the big cone.
\begin{proposition}\label{char_non_nef}
Let $D$ be a pseudo-effective ${\mathbf R}$-divisor on the smooth projective variety $X$, and let
$\Gamma$ be a prime divisor on $X$.
\begin{enumerate}
\item[i)] If $(A_m)_{m\geq 1}$ is a sequence of ample ${\mathbf R}$-divisors whose classes
in $\NS^1(X)_{{\mathbf R}}$ go to zero, and such that all
$D+A_m$ are ${\mathbf Q}$-divisors, then $\sigma_{\Gamma}(D)=0$ if and only if
$\Gamma\not\subseteq \mathbf {SB}(D+A_m)$ for every $m\geq 1$.
\item[ii)] If $k$ is uncountable, then $\sigma_{\Gamma}(D)=0$ if and only if
$\Gamma\not\subseteq\B_-(D)$.
\end{enumerate}
\end{proposition}
\begin{proof}
For the assertion in ii), see \cite[Prop. 2.8]{ELMNP} and \cite[Thm. 7.2]{Mustata}
for the cases when the ground field has characteristic zero or positive, respectively.
Note that in order to check the assertion in i), we may extend the ground field and therefore assume that it is uncountable. In this case,
i) is just a reformulation of ii).
\end{proof}
It was shown in \cite[Cor. III.1.10]{Nakayama} that if $D$ is a pseudo-effective ${\mathbf R}$-divisor on $X$ and
$\Gamma_1,\ldots,\Gamma_r$ are mutually
distinct prime divisors with $\sigma_{\Gamma_i}(D)>0$ for all $i$, then the classes
of $\Gamma_1,\ldots,\Gamma_r$ in $\NS^1(X)_{{\mathbf R}}$ are linearly independent
(the proof therein is characteristic-free).
In particular, $r$ is bounded above by $\dim_{{\mathbf R}}\NS^1(X)_{{\mathbf R}}$, hence there are only
finitely many $\Gamma$ with $\sigma_{\Gamma}(D)>0$. One defines
$$N_{\sigma}(D):=\sum_{\Gamma}\sigma_{\Gamma}(D)\Gamma,\,\,\,\,P_{\sigma}(D):=
D-N_{\sigma}(D).$$
The decomposition $D=N_{\sigma}(D)+P_{\sigma}(D)$ is known as the
\emph{divisorial Zariski decomposition} of $D$, and $N_{\sigma}(D)$ and $P_{\sigma}(D)$
are the negative and the positive part, respectively, of this decomposition. Note that
$N_{\sigma}(D)$ is an effective ${\mathbf R}$-divisor, and it only depends on the numerical
class of $D$.
\begin{proposition}\label{prop_Nakayama}
Let $D$ be a pseudo-effective ${\mathbf R}$-divisor as above, and $D=N_{\sigma}(D)+P_{\sigma}(D)$
its divisorial Zariski decomposition. For every ${\mathbf R}$-divisor $F$ with $0\leq F\leq
N_{\sigma}(D)$, the divisor $D-F$ is pseudo-effective and $N_{\sigma}(D-F)=N_{\sigma}(D)-F$.
In particular, if $A$ is an ample ${\mathbf R}$-divisor such that $P_{\sigma}(D)+A$ is a ${\mathbf Q}$-divisor,
then $\mathbf {SB}(P_{\sigma}(D)+A)$ contains no subvarieties of codimension one.
\end{proposition}
\begin{proof}
The first assertion is proved in \cite[Lem.~III.1.8]{Nakayama}, and the proof therein is independent of characteristic. This implies that for every prime divisor $\Gamma$, we have
$\sigma_{\Gamma}(P_{\sigma}(D))=0$. The second assertion now follows from Proposition~\ref{char_non_nef}.
\end{proof}
For future reference we include the following two lemmas. Both results are well-known,
but we include the proofs for the benefit of the reader.
\begin{lemma}\label{lem_num_trivial1}
If $D$ is a pseudo-effective ${\mathbf R}$-divisor on the smooth $n$-dimensional projective variety $X$
and $D$ is not numerically trivial, then for every ample ${\mathbf R}$-divisors
$A_1,\ldots,A_{n-1}$ on $X$, we have $(D\cdot A_1\cdot\ldots\cdot A_{n-1} )>0$.
\end{lemma}
\begin{proof}
Since $D$ is pseudo-effective, it is clear that $(D\cdot A_1\cdot\ldots\cdot A_{n-1} )\geq 0$,
so we only need to show that this intersection number is nonzero.
This is clear if $X$ is a curve, hence from now on we assume that $n\geq 2$.
Note also that the intersection number we are interested in does not change if we extend the ground field, hence we may assume that the ground field is uncountable.
If $A'_1,\ldots,A'_{n-1}$ are ample ${\mathbf Q}$-divisors such that each $A_i-A'_i$ is ample, then
$$(D\cdot A_1\cdot\ldots\cdot A_{n-1})\geq (D\cdot A'_1\cdot\ldots\cdot A'_{n-1}).$$ Therefore we may assume that
each $A_i$ is a ${\mathbf Q}$-divisor. Furthermore, we may replace each $A_i$ by a multiple and so we may assume that $A_i$ has integer coefficients.
Since $D$ is not numerically trivial, there is an irreducible curve $C$ in $X$ such that
$(D\cdot C)\neq 0$. If $n\geq 3$, let
$\pi\colon Y\to X$ be the blow-up of $X$ along $C$, with exceptional divisor
$E$. For $m\gg 0$, the line bundle $\mathcal{O}_Y(m\pi^*(A_{n-1})-E)$ is very ample. Bertini's theorem
then implies that if $H$ is a general element of $|mA_{n-1}|$ that vanishes along $C$, then
$H$ is irreducible. If $H=r\widetilde{H}$, where $\widetilde{H}=H_{\rm red}$, then
$$(D\cdot A_1\ldots \cdot A_{n-1})=\frac{1}{m}(D\vert_H\cdot A_1\vert_H\cdot\ldots\cdot
A_{n-2}\vert_H)=
\frac{r}{m}
(D\vert_{\widetilde{H}}\cdot A_1\vert_{\widetilde{H}}\cdot\ldots\cdot
A_{n-2}\vert_{\widetilde{H}}).$$
Furthermore, if the class of $D$ is the limit of effective ${\mathbf Q}$-divisor classes $D_m$, then by taking
$H$ to be very general (meaning, chosen outside a countable union of proper closed subvarieties of $|mA_{n-1}|$), we may assume that each $D_m\vert_{\widetilde{H}}$ is effective,
hence $D\vert_{\widetilde{H}}$ is pseudo-effective. Note also that since
$C\subset \widetilde{H}$, it is clear that $D\vert_{\widetilde{H}}$ is not numerically trivial.
After iterating this argument, we find an irreducible and reduced surface $S\subseteq X$
containing $C$, such that $D\vert_S$ is a pseudo-effective ${\mathbf R}$-divisor and such that
$(D\cdot A_1\cdot\ldots\cdot A_{n-1} )$ is a positive multiple of
$(D\vert_S\cdot A_1\vert_S)$. If this is zero,
then the linear map
$D'\to (D\vert_S\cdot D')$ vanishes at a point in the interior of the nef cone of $S$.
On the other hand, it is non-negative on the nef cone of $S$, since $D\vert_S$
is pseudo-effective. Therefore the map is identically zero. In order to obtain a contradiction, it is enough
to show that $(D\vert_S^2)\neq 0$.
Let $f\colon S'\to S$ be a resolution of singularities of $S$ (since $S$ is a surface, such a resolution is known to exist in arbitrary characteristic). Since $(f^*(A_1\vert_S))^2=
(A_1\vert_S^2)>0$ and $(f^*(D\vert_S)\cdot f^*(A_1\vert_S))=(D\vert_S\cdot A_1\vert_S)=0$,
it follows from the Hodge index theorem that $(f^*(D\vert_S)^2)\leq 0$, with equality if and only if
$f^*(D\vert_S)$ is numerically trivial. However, if $C'\subset S'$ is an irreducible curve that
dominates $C$, then $(f^*(D\vert_S)\cdot C')\neq 0$. Therefore
$(D\vert_S^2)<0$, which completes the proof of the lemma.
\end{proof}
\begin{lemma}\label{lem_num_trivial2}
If $D$ is a nef ${\mathbf R}$-divisor on the smooth $n$-dimensional projective variety $X$
and $0\leq j\leq n$ is such that the cycle class
$D^j$ is not numerically trivial, then for all ample ${\mathbf R}$-divisors
$A_1,\ldots,A_{n-j}$ on $X$ we have
$(D^j\cdot A_1\cdot\ldots\cdot A_{n-j})>0$.
\end{lemma}
\begin{proof}
By definition, the fact that $D^j$ is not numerically trivial means that there is a polynomial $P$ in Chern classes
of vector bundles on $X$ such that ${\rm deg}(P\cap D^j)\neq 0$. However, since $X$ is nonsingular, it follows from the Grothendieck-Riemann-Roch theorem that, in fact, we can find
an irreducible subvariety $W$ of $X$ of dimension $j$ such that $(D^j\cdot W)\neq 0$
(see \cite[Example~19.1.5]{Fulton}).
We may assume that $1\leq j\leq n-1$, as otherwise the assertion in the lemma is trivial.
Arguing as in the proof of Lemma~\ref{lem_num_trivial1}, we may assume that all $A_i$ have integer coefficients. Furthermore, we
can find an irreducible and reduced subvariety $Y$ of $X$ of dimension $j+1$ such that $W\subseteq Y$ and $(D^j\cdot A_1\cdot\ldots\cdot A_{n-j})$ is a positive multiple of
$(D\vert_Y^j\cdot A_1\vert_Y)$. If $m\gg 0$, we can find $Z$ in
$|mA_1\vert_Y|$ such that $W$ is an irreducible component of $Z$. Using the fact that $D$ is nef,
we conclude that
$$(D\vert_Y^j\cdot A_1\vert_Y)=\frac{1}{m}(D^j\vert_Z)\geq\frac{1}{m}(D\vert_W^j)>0.$$
This completes the proof of the lemma.
\end{proof}
We now recall the definition of numerical dimension. Let $X$ be a smooth, projective variety
as above, and $D$ an ${\mathbf R}$-divisor on $X$. The \emph{numerical dimension}
of $D$ is defined by
$$\kappa_\sigma(D):=\max\{\ell\in {\mathbf Z}_{\geq 0}\mid \liminf_{m\to\infty} \frac {h^0(X,\mathcal{O}_X(\lfloor mD\rfloor+A))}{m^{\ell}}>0\quad \text{for some ample divisor }A \}$$
(by convention, if the above set is empty, then $\kappa_{\sigma}(D)=-\infty$).
Note that if $A$ satisfies the condition in the definition of $\kappa_{\sigma}(D)$ for
$\ell$, then the same holds for all divisors
$A'$ such that $A'-A$ is effective. We also note that in the above definition we could replace the
round-down by the round-up function. More precisely, we have
$$\kappa_\sigma(D)=\max\{\ell\in {\mathbf Z}_{\geq 0}\mid \liminf_{m\to\infty} \frac {h^0(X,\mathcal{O}_X(\lceil mD\rceil+A))}{m^{\ell}}>0\quad \text{for some ample divisor }A \}.$$
Indeed, let $T$ be the reduced effective divisor with the same support as $D$, and let $H$ be an ample divisor such that $H-T$ is effective. For every $m\geq 1$, the difference
$T-(\lceil mD\rceil-\lfloor mD\rfloor)$ is effective, hence
$H-(\lceil mD\rceil-\lfloor mD\rfloor)$ is effective. Therefore for every ample divisor $A$ we have
$$h^0(X,\mathcal{O}_X(\lceil mD\rceil + A))\le h^0(X,\mathcal{O}_X(\lfloor mD\rfloor + A+H)).$$
This proves our assertion.
We will see in Proposition~\ref{char_pseudoeffective} below that
$D$ is pseudo-effective if and only if $\kappa_{\sigma}(D)\geq 0$.
It is also easy to see that $\kappa_{\sigma}(D)=\dim(X)$ if and only if $D$ is big.
\begin{remark}
In \cite{Nakayama}, variants of the above definition were introduced. For example,
the limit inferior was replaced by limit superior, or the condition
$\liminf>0$ was replaced by $\limsup<\infty$. It was shown in \cite{Lehmann} that in characteristic zero
all these variants give the same invariant, which also admits other characterizations in terms of volumes or positive intersection products. We expect that such a result should also hold in
positive characteristic, but we do not pursue this here.
\end{remark}
In this paper we are mainly concerned with smooth varieties. However, we now briefly discuss the
numerical dimension of ${\mathbf R}$-Cartier ${\mathbf R}$-divisors on normal varieties. In particular, we show that it can be computed as
the numerical dimension of the pull-back to any smooth alteration.
\begin{remark}\label{non_smooth1}
If $D$ is an ${\mathbf R}$-Cartier ${\mathbf R}$-divisor on a normal projective variety $X$, then we can still define the numerical dimension $\kappa_{\sigma}(D)$ by the same formula as above.
In this case, however, it is convenient to also use an alternative description, as follows. Consider
a sequence of Cartier divisors $(D_m)_{m\geq 1}$ on $X$, with the following property:
there are finitely many Cartier divisors $T_1,\ldots,T_r$ and $M>0$
such that we can write $D=\sum_{i=1}^rq_iT_i$ and $D_m=\sum_{i=1}^rq_{m,i}T_i$,
with $q_i\in{\mathbf R}$ and $q_{m,i}\in{\mathbf Z}$
such that $|mq_i-q_{m,i}|\leq M$ for every $i$ and $m$.
We can always find such a sequence: there is such an expression for $D$ since $D$
is ${\mathbf R}$-Cartier, and we can take $D_m=\sum_{i=1}^rq_{m,i}T_i$, with
$q_{m,i}=\lfloor mq_i\rfloor$ or $q_{m,i}=\lceil mq_i\rceil$.
Given \emph{any} sequence
$(D_m)_{m\geq 1}$ as above,
$$\kappa_{\sigma}(D)=\max\{\ell\in {\mathbf Z}_{\geq 0}\mid \liminf_{m\to\infty} \frac {h^0(X,\mathcal{O}_X(D_m+A))}{m^{\ell}}>0\, \text{for some ample Cartier divisor }A \}.$$
The equality is a consequence of the fact that under our assumptions on $(D_m)_{m\geq 1}$,
all divisors $D_m-\lfloor mD\rfloor$ are supported on a finite set of prime divisors, and their coefficients are bounded. Therefore
we can find an ample Cartier divisor $H$ such that we have $h^0(X,\mathcal{O}_X(H+D_m-\lfloor mD\rfloor))\geq 1$ and $h^0(X,\mathcal{O}_X(H-D_m+\lfloor mD\rfloor))\geq 1$ for all $m\geq 1$.
\end{remark}
Recall that an alteration $\pi \colon Y \to X$ is a projective, surjective, and generically finite morphism. The existence of alterations with smooth total space is guaranteed by a theorem of de Jong \cite{deJong}.
\begin{proposition}\label{alteration}
Let $X$ be a normal projective variety and $D$ an ${\mathbf R}$-Cartier ${\mathbf R}$-divisor on $X$. If
$\pi\colon Y\to X$ is an alteration, with $Y$ normal, then
$$\kappa_{\sigma}(D)=\kappa_{\sigma}(\pi^*(D)).$$
\end{proposition}
\begin{proof}
Let us write $D=\sum_{i=1}^rq_iT_i$, where each $T_i$ is a Cartier divisor and $q_i\in{\mathbf R}$,
and put $D_m=\sum_{i=1}^r\lfloor mq_i\rfloor T_i$ for $m\geq 1$. It follows from
Remark~\ref{non_smooth1} that we can use the sequences $(D_m)_{m\geq 1}$
and $(\pi^*(D_m))_{m\geq 1}$ to compute $\kappa_{\sigma}(D)$ and
$\kappa_{\sigma}(\pi^*(D))$, respectively.
If $A$ is an ample Cartier divisor on $X$ and $B$ is a Cartier divisor on $Y$ such that
$B-\pi^*(A)$ is ample, then we have
$$h^0(Y,\mathcal{O}_Y(\pi^*(D_m)+B))\geq h^0(X,\mathcal{O}_X(D_m+A))\,\text{for every}\,m\geq 1,$$
hence $\kappa_{\sigma}(\pi^*(D))\geq\kappa_{\sigma}(D)$. In order to prove the opposite inequality, suppose that $H_Y$ is an ample divisor on $Y$ such that
$h^0(Y, \mathcal{O}_Y(\pi^*(D_m)+H_Y))\geq Cm^{\ell}$ for some $C>0$ and all $m\gg 0$.
If $H$ is any ample Cartier divisor on $X$, then $\pi^*(H)$ is big on $Y$, hence we can find
$d>0$ such that $h^0(Y,\mathcal{O}_Y(d\pi^*(H)-H_Y))\geq 1$, hence
$$h^0(X,\mathcal{O}_X(D_m+dH)\otimes\pi_*(\mathcal{O}_Y))=h^0(Y, \mathcal{O}_Y(\pi^*(D_m+dH)))\geq Cm^{\ell}\,\,\text{for all}\,\,m\gg 0.$$
Note now that for a suitable $d'>0$, we can find an embedding
$\pi_*(\mathcal{O}_Y)\hookrightarrow \mathcal{O}_X(d'H)^{\oplus N}$ for a positive integer $N$. Indeed,
let $d'\gg 0$ be such that $\pi_*(\mathcal{O}_Y)^{\vee}\otimes\mathcal{O}_X(d'H)$ is generated by global sections (for a sheaf ${\mathcal F}$, we denote by ${\mathcal F}^{\vee}$ its dual
${\mathcal Hom}_{\mathcal{O}_X}({\mathcal F}, {\mathcal O}_X)$). We thus have a surjection
$\mathcal{O}_X(-d'H)^{\oplus N}\to \pi_*(\mathcal{O}_Y)^{\vee}$, which induces an embedding
$$\pi_*(\mathcal{O}_Y)\hookrightarrow(\pi_*(\mathcal{O}_Y)^{\vee})^{\vee}\hookrightarrow\mathcal{O}_X(d'H)^{\oplus N}.$$ We deduce that for $m\gg 0$, we have
$h^0(X,\mathcal{O}_X(D_m+(d+d')H))\geq \frac{C}{N}m^{\ell}$, hence
$\kappa_{\sigma}(D)\geq \ell$. This completes the proof of the proposition.
\end{proof}
We conclude this section with the following characterization of pseudo-effective divisors.
\begin{proposition}\label{char_pseudoeffective}
If $X$ is a smooth projective variety, then there is a divisor $G$
on $X$ such that an ${\mathbf R}$-divisor $D$ on $X$ is pseudo-effective if and only if
$h^0(X,\mathcal{O}_X(\lceil mD\rceil+G))>0$ for every $m\geq 1$. In particular, $D$
is pseudo-effective if and only if $\kappa_{\sigma}(D)\geq 0$.
\end{proposition}
\begin{proof}
The assertion is well-known when the ground field has characteristic zero (see
\cite[Cor.~V.1.4]{Nakayama}), so we only give the argument when the ground field has positive characteristic. Let $H$ be a very ample divisor. We show that
$G=K_X+(n+2)H$ has the required property, where $K_X$ is such that $\mathcal{O}_X(K_X)\simeq\omega_X$ and
$n=\dim(X)$.
Suppose first that $D$ is pseudo-effective.
Note that $\lceil mD\rceil -mD$ is effective, hence $\lceil mD\rceil$ is pseudo-effective,
and therefore $E=\lceil mD\rceil +H$ is a big divisor. It now follows from
\cite[Thm.~4.1]{Mustata} that
$$\tau(\parallel E\parallel)\otimes_{\mathcal{O}_X}\mathcal{O}_X(K_X+E+(n+1)H)$$
is globally generated, where $\tau(\parallel E\parallel)$ is the asymptotic test ideal of $E$.
The important thing for us is that $\tau(\parallel E\parallel)$ is nonzero, which implies
$h^0(X,\mathcal{O}_X(\lceil mD\rceil+G))>0$. Note that one can give a similar argument in characteristic zero, by replacing the asymptotic test ideal by the asymptotic multiplier ideal, and using
the corresponding global generation result (see \cite[Cor.~11.2.13]{positivity}).
The converse is clear (in fact, any $G$ satisfies this direction). Indeed, if we have
$h^0(X,\mathcal{O}_X(\lceil mD\rceil+G))>0$ for every $m\geq 1$, then each
$\frac{1}{m}\lceil mD\rceil +\frac{1}{m}G$ is pseudo-effective. Therefore the limit $D$
of these divisors is pseudo-effective as well.
The last assertion in the proposition is now a consequence of the first one, and of the second description of
$\kappa_{\sigma}(D)$ (the one using $\lceil mD\rceil$ instead of $\lfloor mD\rfloor$).
\end{proof}
\begin{remark}\label{non_smooth2}
The characterization of pseudo-effective divisors in terms of their numerical dimension,
given in Proposition~\ref{char_pseudoeffective}, also holds for singular varieties. More precisely, if $D$ is an ${\mathbf R}$-Cartier
${\mathbf R}$-divisor on the normal projective variety $X$, then $\kappa_{\sigma}(D)\geq 0$ if and only if
$D$ is pseudo-effective. Indeed, suppose that $D$ is pseudo-effective and consider an alteration
$\pi\colon Y\to X$, with $Y$ smooth. Since $\pi^*(D)$ is pseudo-effective, we have
$\kappa_{\sigma}(\pi^*(D))\geq 0$ by Proposition~\ref{char_pseudoeffective},
and since $\kappa_{\sigma}(\pi^*(D))=
\kappa_{\sigma}(D)$ by Proposition~\ref{alteration}, we conclude that $\kappa_{\sigma}(D)\geq 0$.
Conversely, suppose that $\kappa_{\sigma}(D)\geq 0$, and let $(D_m)_{m\geq 1}$ be a sequence of Cartier divisors on $X$ as in Remark~\ref{non_smooth1}. By assumption, there
is an ample Cartier divisor $A$ such that $h^0(X,\mathcal{O}_X(D_m+A))\geq 1$ for every $m\gg 0$.
Since $\frac{1}{m}(D_m+A)$ is a sequence of pseudo-effective divisors converging to
$D$, it follows that $D$ is pseudo-effective.
\end{remark}
\begin{remark}\label{non_smooth3}
The key ingredient in the proof of Proposition~\ref{char_pseudoeffective}
was the fact that on every smooth projective variety $X$, there is a
Cartier divisor $T$ such that for every big line bundle $L$ on $X$, we have
$h^0(X,L\otimes \mathcal{O}_X(T))\geq 1$. This, in fact, holds on arbitrary projective varieties.
Indeed, given any projective variety $X$,
consider an alteration $\pi\colon Y\to X$
with $Y$ smooth, and let $T_Y$ be a divisor on $Y$ such that $h^0(Y, L'\otimes\mathcal{O}_Y(T_Y))\geq 1$ for every big line bundle $L'$ on $Y$. Suppose that $L$ is a big line bundle on $X$. Since
$\pi^*(L)$ is big on $Y$, we have $h^0(Y,\pi^*(L)\otimes T_Y)\geq 1$. Arguing as in the proof of
Proposition~\ref{alteration}, we see that if $H$ is an ample Cartier divisor on $X$, then there are
positive integers $d$ and $d'$ such that $h^0(Y, \mathcal{O}_Y(d\pi^*(H)-T_Y))\geq 1$
and we have an embedding $\pi_*(\mathcal{O}_Y)\hookrightarrow \mathcal{O}_X(d'H)^{\oplus N}$ for some
$N\geq 1$. We conclude that if $T=(d+d')H$, then for every big line bundle $L$ on
$X$, we have $h^0(X,L\otimes \mathcal{O}_X(T))\geq 1$.
Note that if the Cartier divisor $T$ on $X$ is as above, and if $A$ is any ample Cartier divisor on $X$,
then $G=T+A$ satisfies the conclusion of Proposition~\ref{char_pseudoeffective}, at least for
Cartier divisors. Indeed, if $D$ is a pseudo-effective Cartier divisor,
then $mD+A$ is big for every $m\geq 1$, hence $h^0(X,\mathcal{O}_X(mD+G))\geq 1$.
\end{remark}
\begin{remark}\label{Totaro}
As B.~Totaro pointed out, one can alternatively deduce the assertions in
Proposition~\ref{char_pseudoeffective} and Remark~\ref{non_smooth3} from the results in
\cite{Arapura} and
\cite{Totaro}. Suppose, for simplicity, that $X$ is a smooth $n$-dimensional projective variety defined over a field of positive characteristic $p$. It follows from a result of Arapura \cite[Thm. 5.4]{Arapura} that if $H$ is a large
enough multiple of an ample Cartier divisor, then $H$ has the following property: if
$M$ is a line bundle such that $H^n(X, M\otimes\mathcal{O}_X(-(n+1)H))=0$, then $H^n(X, M^{p^e})=0$ for all $e$ large enough. If $L$ is a big line bundle, then
$H^0(X, \omega_X\otimes L^{p^e})\neq 0$ for $e\gg 0$. It follows from the above property of $H$ and Serre duality that $H^0(X, \omega_X\otimes\mathcal{O}_X((n+1)H)\otimes L)\neq 0$.
As we have seen, this is enough to give the statement of
Proposition~\ref{char_pseudoeffective}. One can then deduce the corresponding statement in the singular case as in Remark~\ref{non_smooth3}. Alternatively, one can obtain this directly
using a similar argument to the one above and Totaro's extension \cite[Thm.~5.1]{Totaro}
of Arapura's result to singular varieties.
\end{remark}
\section{A general vanishing statement}
In this section we give an elementary vanishing result, valid in arbitrary characteristic.
This only relies on asymptotic Serre vanishing.
Suppose that $X$ is a normal projective variety over
an algebraically closed field $k$.
Given a nonzero ideal $\mathfrak{a}$ on $X$, we define for every
$\lambda\in{\mathbf Q}_{\geq 0}$ another ideal $\mathfrak{a}_{\lambda}$, as follows. Let $\pi\colon\widetilde{X}\to X$
be the normalization of the blow-up of $X$ along $\mathfrak{a}$, and let us write
$\mathfrak{a}\cdot\mathcal{O}_{\widetilde{X}}=\mathcal{O}_{\widetilde{X}}(-F)$. With this notation, we put
$$\mathfrak{a}_{\lambda}:=\pi_*\mathcal{O}_{\widetilde{X}}(-\lceil\lambda F\rceil).$$
Note that $\mathfrak{a}_{\lambda}\subseteq\pi_*\mathcal{O}_{\widetilde{X}}=\mathcal{O}_X$, hence
$\mathfrak{a}_{\lambda}$ is indeed an ideal of $\mathcal{O}_X$
\begin{proposition}\label{vanishing1}
Let $\mathfrak{a}$ be a nonzero ideal on $X$ and $B$ a Cartier divisor on $X$ such that
$\mathfrak{a}\otimes\mathcal{O}_X(B)$ is globally generated. If $E$ is another Cartier divisor on $X$ such that
$E-\lambda B$ is ample for some $\lambda\in{\mathbf Q}_{\geq 0}$, then there is some $\epsilon>0$ such that for every $\lambda'\in{\mathbf Q}$ with
$\lambda<\lambda'\leq\lambda+\epsilon$ and every locally free sheaf $\mathcal{E}$
of finite rank on $X$, we have
$$H^i(X,\mathcal{E}\otimes\mathcal{O}_X(\ell E)\otimes\mathfrak{a}_{\ell\lambda'})=0$$
for every $i\geq 1$ and every $\ell\gg 0$ ${\rm (}$depending on $\lambda'$ and $\mathcal{E}$${\rm )}$.
\end{proposition}
\begin{proof}
Let $\pi\colon\widetilde{X}\to X$ and $F$ be as above, so that $\mathcal{O}_{\widetilde{X}}(-F)$
is $\pi$-ample. Since $E-\lambda B$ is ample on $X$, it follows that there is
$\epsilon>0$ such that $\pi^*(E-\lambda B)-\eta F$ is ample on $\widetilde{X}$
for $0<\eta\leq\epsilon$.
Note also that by the assumption on $\mathfrak{a}$ and $B$, we can write $\pi^*(B)=F+M$, where
$\mathcal{O}_{\widetilde{X}}(M)$ is base-point free.
Suppose now that $\lambda'\in{\mathbf Q}$ satisfies $\lambda<\lambda'\leq\lambda+\epsilon$,
and let $d$ be a positive integer such that $d\lambda$ and $d\lambda'$ are integers. For every $\ell$ we can
write $\ell=qd+r$, for integers $q$ and $r$, with $0\leq r<d$. Therefore $\lceil \ell\lambda' F\rceil
=qd\lambda'F+\lceil r\lambda' F\rceil$. Note that when $\ell$ goes to infinity, then also
$q$ goes to infinity, while there are only finitely many divisors of the form $\lceil r\lambda' F\rceil$.
Since $\mathcal{O}_{\widetilde{X}}(-F)$ is $\pi$-ample, it follows from the above discussion and relative
asymptotic Serre vanishing that for $\ell\gg 0$, we have
$$R^j\pi_*\mathcal{O}_{\widetilde{X}}(-\lceil\ell\lambda' F\rceil)=0\,\,\text{for all}\,\,j\geq 1.$$
Whenever this holds, the projection formula and the Leray spectral sequence imply
\begin{equation}\label{eq1_prop1}
H^i(X,\mathcal{E}\otimes \mathcal{O}_X(\ell E)\otimes\mathfrak{a}_{\ell\lambda'})\simeq
H^i(\widetilde{X},\pi^*(\mathcal{E})\otimes\pi^*\mathcal{O}_X(\ell E)\otimes\mathcal{O}_{\widetilde{X}}(-\lceil
\ell\lambda' F\rceil)).
\end{equation}
On the other hand, if $\eta=\lambda'-\lambda$, then we can write
$$\pi^*(\ell E)-\lceil \ell\lambda' F\rceil =\ell\left(\pi^*(E-\lambda B)-\eta F+\lambda M\right) +
\ell\lambda'F-\lceil \ell\lambda' F\rceil.$$
Since $\pi^*(E-\lambda B)-\eta F$ is ample and $M$ is nef, it follows that
$\pi^*(E-\lambda B)-\eta F+\lambda M$ is ample.
If we write as above $\ell=qd+r$, then
$$\ell\left(\pi^*(E-\lambda B)-\eta F+\lambda M\right) +
\ell\lambda'F-\lceil \ell\lambda' F\rceil$$
$$=qd\left(\pi^*(E-\lambda B)-\eta F+\lambda M\right)
+r\left(\pi^*(E-\lambda B)-\eta F+\lambda M\right)+r\lambda'F-\lceil r\lambda'F\rceil.$$
Since when we vary $\ell$ the divisor
$r\left(\pi^*(E-\lambda B)-\eta F+\lambda M\right)+r\lambda'F-\lceil r\lambda'F\rceil$
can only take finitely many values, and when $\ell$ goes to infinity, $q$ also goes to infinity,
it follows from asymptotic Serre vanishing that
$$H^i(\widetilde{X},\pi^*(\mathcal{E})\otimes\pi^*\mathcal{O}_X(\ell E)\otimes\mathcal{O}_{\widetilde{X}}(-\lceil
\ell\lambda' F\rceil))=0$$
for all $i\geq 1$ and all $\ell\gg 0$. The assertion in the proposition now follows from this and
the isomorphism (\ref{eq1_prop1}).
\end{proof}
\begin{remark}
For our purpose the precise definition of $\mathfrak{a}_{\lambda}$ will not be important.
For example, we might have taken instead $\mathfrak{a}_{\lambda}:=
\pi_*\mathcal{O}_{\widetilde{X}}(-\lfloor \lambda
F\rfloor)$, and in characteristic zero we might have taken $\mathfrak{a}_{\lambda}=\mathcal{J}(\mathfrak{a}^{\lambda})$,
the multiplier ideal of $\mathfrak{a}$ of exponent $\lambda$.
The proof of Proposition~\ref{vanishing1} works also with these definitions,
and the first variant would work as well for the applications in the next section.
The definition that we have considered gives the
\emph{integrally closed rational powers} of $\mathfrak{a}$, see \cite[\S 10.5]{HS}.
\end{remark}
\begin{corollary}\label{cor_vanishing}
The assertion in Proposition~\ref{vanishing1} also holds if instead of assuming $\mathcal{E}$
locally free we only assume that ${\mathcal Tor}_i^{\mathcal{O}_X}(\mathcal{E},\mathfrak{a}_{\ell\lambda'})=0$
for all $i\geq 1$, all $\lambda'\in{\mathbf Q}_{>0}$, and all $\ell\gg 0$
${\rm (}$depending on $\lambda'$ and $\mathcal{E}$${\rm )}$.
\end{corollary}
\begin{proof}
Since $X$ is projective, we can find a (possibly infinite) resolution
$$\cdots\to\mathcal{E}_i\to\ldots\to \mathcal{E}_1\to\mathcal{E}_0\to\mathcal{E}\to 0,$$
with all $\mathcal{E}_i$ locally free $\mathcal{O}_X$-modules of finite rank.
Our assumption on $\mathcal{E}$ implies that after tensoring this complex by
$\mathcal{O}_X(\ell E)\otimes \mathfrak{a}_{\ell\lambda'}$, the resulting complex is still exact.
By chasing the resulting short exact sequences, we see that if
$\lambda'>\lambda$ is close enough to $\lambda$, and
$\ell$ is large enough (depending on $\lambda'$) so that the conclusion of Proposition~\ref{vanishing1} is satisfied for all $\mathcal{E}_i$ with $0\leq i\leq n-1$, where
$n=\dim(X)$, then we obtain the conclusion of the corollary.
\end{proof}
\section{Proofs of the main results}
Our first goal is to prove Theorem~\ref{p_lifting} from the Introduction.
We henceforth assume that the ground field $k$ is algebraically closed, of
characteristic $p>0$.
A key ingredient
is the log trace map with respect to the Frobenius morphism,
that we now review.
Suppose that $X$ is a smooth variety over $k$. Let $F=F_X\colon X\to X$ denote the absolute Frobenius morphism on $X$, which is the identity on the topological space and which takes
a regular function $u$ to $u^p$.
We have a canonical surjective map
$$t_{X}\colon F_*\omega_X\to \omega_X,$$
which can be
either defined as the trace map for duality with respect to $F$, or as coming from the Cartier isomorphism. Given algebraic coordinates $x_1,\ldots,x_n$ on an open subset $U$ of $X$, this map is characterized by
$$t_X(x_1^{i_1}\cdots x_n^{i_n}dx_1\wedge\cdots\wedge dx_n)=
x_1^{\frac{i_1-p+1}{p}}\cdots x_n^{\frac{i_n-p+1}{p}}dx_1\wedge\cdots\wedge dx_n,$$
where the monomial on the right-hand side is understood to be zero if one of the exponents is not an integer.
Iterating this map $e$ times we obtain a surjective map $t_X^e\colon F^e_*(\omega_X)\to\omega_X$.
Suppose now that $E=E_1+\ldots+E_N$ is a simple normal crossing divisor on $X$.
By tensoring $t_X$ with $\mathcal{O}_X(E)$, and composing with the inclusion
$$F_*(\omega_X(E))\hookrightarrow F_*(\omega_X(pE))\simeq
(F_*(\omega_X))\otimes\mathcal{O}_X(E),$$ we obtain a \emph{log trace map}
$$t_{X,E}\colon F_*(\omega_X(E))\to\omega_X(E).$$
Using the above explicit description in terms of a system of coordinates on $U$ with the property
that each prime divisor in $E$ which intersects $U$ is defined by some $x_i=0$, it is easy to see
that $t_{X,E}$ is again surjective. After iterating $e$ times $t_{X,E}$, we obtain
$$t_{X,E}^e\colon F^e_*(\omega_X(E))\to\omega_X(E).$$
Note that this construction is compatible with adjunction. More precisely, given
$(X,E)$ as above, let $Y=E_1\cap\ldots\cap E_r$. This is a smooth variety of
codimension $r$ in $X$ (possibly
not connected), and $E$ induces a simple normal crossing divisor
$E_Y=\sum_{i=r+1}^NE_i\vert_Y$. Adjunction gives an isomorphism
$\omega_X(E_1+\ldots+E_r)\vert_Y\simeq\omega_Y$, and it follows from the above description
of the log trace maps in local coordinates that the diagram
\begin{equation}\label{comm_diag1}
\begin{CD}
(F_X)_*(\omega_X(E))@>>>
(F_Y)_*(\omega_Y(E_Y))\\
@V{t_{X,E}}VV @VV{t_{Y,E_Y}}V\\
\omega_X(E)@>>> \omega_Y(E_Y),
\end{CD}
\end{equation}
is commutative, where the horizontal maps are induced by restriction to $Y$
and the adjunction isomorphism.
\noindent{\bf Theorem \ref{p_lifting}. }{\it Let $X$ be a smooth projective variety over an
algebraically closed field $k$ of characteristic $p>0$, and let $D$ be an ${\mathbf R}$-divisor on $X$. Suppose that $H=H_1+\ldots+H_r$ is a simple normal crossing divisor on
$X$, with $r<\dim(X)$,
and that
$W:=H_1\cap\dots\cap H_r$ does not intersect the non-nef locus $\mathbf B_-(D)$ of $D$.
In this case there exists an ample divisor $G$ on $X$ such that
if $$A=K_X+H+2G$$
then $A$ is ample and
the restriction map
\begin{equation}\label{e_lifting}
H^0(X,\mathcal{O}_X(\lfloor mD\rfloor +A))\to H^0(W,\mathcal{O}_X(\lfloor mD\rfloor +A)\vert_W)
\end{equation}
is surjective for every $m\ge 1$.
}
\begin{proof
Let $\mathcal F$ be the kernel of the trace morphism
$$t_W\colon (F_W)_*(\omega_W)\to \omega_W.$$
By Fujita's vanishing theorem (see \cite{Fujita}), if $G$ is a large enough multiple of
a given ample divisor on $X$, then
\begin{equation}
\label{eq.H1Vanishing}
H^1(W,\mathcal F\otimes \mathcal{O}_X(G)\vert_W\otimes L)=0
\end{equation}
for every nef line bundle $L$ on $W$.
Let us write $D=\alpha_1D_1+\ldots+\alpha_sD_s$, with the $D_i$ distinct prime divisors on $X$ and $\alpha_i\in {\mathbf R}$. After possibly replacing $G$ by a multiple, we may assume that
$G-(t_1D_1+\ldots+t_sD_s)$ is ample for all $t_1,\ldots,t_s\in [0,1]$.
In this case $G-\{mD\}$ is ample for every $m\geq 1$, where
$\{mD\}=mD-\lfloor mD\rfloor$. Since
$\lfloor mD\rfloor+G=mD+(G-\{mD\})$, it follows from our assumption that
the stable base locus ${\rm SB}(\lfloor mD\rfloor+G)$ does not intersect $W$.
Let $r_m\geq 1$ be such that the base locus of $|r_m(\lfloor mD\rfloor+G)|$ does not intersect $W$, and let us denote by $\mathfrak{a}^{(m)}$ the ideal defining the base-locus of this linear system,
with its natural scheme structure.
Let $A=K_X+H+2G$, where $K_X$ is such that $\omega_X=\mathcal{O}_X(K_X)$. After possibly replacing $G$ by a multiple, we may assume that $A$ is ample. Let us fix $m\geq 1$. In order to prove the surjectivity of (\ref{e_lifting}),
we first apply Corollary~\ref{cor_vanishing}
for the ideal $\mathfrak{a}^{(m)}$, $\lambda_m=\frac{1}{r_m}$, the divisors
$B_m=r_m(\lfloor mD\rfloor+G)$ and
$E_m=\lfloor mD\rfloor +2G$, and the sheaf $\mathcal{E}_m=\mathcal{I}_W\otimes\omega_X(H)$,
where $\mathcal{I}_W$ is the ideal defining the subvariety $W$.
Note that since $W$ is disjoint from the zero-locus of $\mathfrak{a}^{(m)}$,
we have ${\mathcal Tor}_i^{\mathcal{O}_X}(\mathcal{E}_m, \mathfrak{a}^{(m)}_{\mu})=0$
for every $\mu\in{\mathbf Q}_{>0}$ and every $i\geq 1$. The vanishing given by Corollary~\ref{cor_vanishing}
implies that we can find $\lambda'_m>\lambda_m$ such that
for $e\gg 0$, the restriction map
\begin{equation}\label{eq1_theorem}
H^0(X, \omega_X(H)\otimes\mathcal{O}_X(p^eE_m)\otimes\mathfrak{a}^{(m)}_{p^e\lambda'_m})
\to H^0(W, \omega_W\otimes\mathcal{O}_X(p^eE_m)\vert_W)
\end{equation}
is surjective (note that $\mathfrak{a}^{(m)}_{p^e\lambda'_m}\cdot\mathcal{O}_W=\mathcal{O}_W$,
since $W$ does not intersect the zero-locus of
$\mathfrak{a}^{(m)}$). In particular, the restriction map
\begin{equation}\label{eq2_theorem}
H^0(X, \omega_X(H)\otimes\mathcal{O}_X(p^eE_m))
\to H^0(W, \omega_W\otimes\mathcal{O}_X(p^eE_m)\vert_W)
\end{equation}
is surjective for $e\gg 0$.
It follows from (\ref{comm_diag1}) after iteration that we also have a commutative diagram
\begin{equation}\label{diag1}
\begin{CD}
(F_X^e)_*(\omega_X(H)) @>>> (F_W^e)_*(\omega_W) \\
@V{t_{X,H}^e}VV@VV{t_W^e}V \\
\omega_X(H)@>>> \omega_W
\end{CD}
\end{equation}
in which the horizontal maps are induced by restriction via adjunction,
and the vertical maps are the corresponding iterated trace maps.
Tensoring with $\mathcal{O}_X(E_m)$ and using the projection formula, we obtain the commutative diagram
\begin{equation}\label{diag2}
\begin{CD}
(F_X^e)_*(\omega_X(H)\otimes\mathcal{O}_X(p^eE_m)) @>>> (F_W^e)_*(\omega_W
\otimes\mathcal{O}_X(p^eE_m)\vert_W) \\
@VVV@VVV \\
\omega_X(H)\otimes\mathcal{O}_X(E_m)@>>> \omega_W\otimes\mathcal{O}_X(E_m)\vert_W.
\end{CD}
\end{equation}
By taking global sections, we obtain the commutative diagram
\begin{equation}
\begin{CD}\label{diag3}
H^0\big(X, (F_X^e)_*(\omega_X(H)\otimes\mathcal{O}_X(p^eE_m))\big) @>>> H^0\big(W,(F_W^e)_*( \omega_W
\otimes\mathcal{O}_X(p^eE_m)\vert_W)\big) \\
@VVV@VVV \\
H^0(X, \omega_X(H)\otimes\mathcal{O}_X(E_m))@>>> H^0(W, \omega_W\otimes\mathcal{O}_X(E_m)\vert_W),
\end{CD}
\end{equation}
in which the top horizontal map is surjective for $e\gg 0$ by (\ref{eq2_theorem}).
We claim that the right vertical map is surjective for every $e\geq 1$. In order to prove this, it is enough to show that
$$H^0\big(W, (F^{i+1}_W)_*(\omega_W\otimes\mathcal{O}_X(p^{i+1}E_m)\vert_W)\big)
\to H^0\big(W, (F^i_W)_*(\omega_W\otimes\mathcal{O}_X(p^iE_m)\vert_W)\big)$$
is surjective for every $i\geq 0$. It follows from the exact sequence
$$0\to (F^i_W)_*(\mathcal{F})\to (F^{i+1}_W)_*(\omega_W)\to (F^i_W)_*(\omega_W)\to 0$$
that it is enough to show that $H^1(W, (F^i_W)_*(\mathcal{F}\otimes\mathcal{O}_X(p^iE_m)\vert_W))=0$,
or equivalently,
\begin{equation}\label{eq_vanishing}
H^1(W, \mathcal{F}\otimes\mathcal{O}_X(p^iE_m)\vert_W)=0.
\end{equation}
Recall that $\mathcal{O}_X(\lfloor mD\rfloor +G)\vert_W$ is nef (in fact, semiample).
Therefore
$$\mathcal{F}\otimes \mathcal{O}_X(p^iE_m)\vert_W\simeq\mathcal{F}\otimes \mathcal{O}_X(G)\vert_W
\otimes\mathcal{O}_X(p^i(\lfloor mD\rfloor+G)+
(p^i-1)G)\vert_W,$$
hence the vanishing in (\ref{eq_vanishing}) follows from \eqref{eq.H1Vanishing}.
Since both the right vertical and top horizontal maps in
(\ref{diag3}) are surjective for $e\gg 0$, it follows that also the bottom horizontal map in that diagram is surjective, which is precisely what we needed to prove.
\end{proof}
\begin{remark}
It follows from the above proof that the surjectivity in the statement of Theorem~\ref{p_lifting}
also holds if we replace $A$ by any divisor $A'$ such that $A'-A$ is ample.
\end{remark}
\begin{remark}\label{curve_case}
If the subvariety $W$ in Theorem~\ref{p_lifting} is a curve, then we can be more explicit
about the choice of $G$ such that we have the vanishing in (\ref{eq.H1Vanishing}). Indeed,
by a theorem of Tango (cf. \cite{Tan72}) the vanishing in (\ref{eq.H1Vanishing})
holds if ${\rm deg}(G\vert_W)>\frac{2g-2}{p}$, where $g$ is the genus of $W$ and
${\rm char}(k)=p$.
\end{remark}
\begin{remark}
Instead of restricting to $W$ in one step via adjunction in diagram \eqref{diag1}, it is possible to cut down by each $H_i$ individually, and keep track of the sections that extend via vector subspaces similar to the $S^0$ defined in \cite{Schwede}. In fact, it follows from \eqref{diag3} that a subspace of $S^0(X,\mathcal{O}_X(\lfloor mD\rfloor +A))$ surjects onto $H^0(W,\mathcal{O}_X(\lfloor mD\rfloor +A)\vert_W)$ for all $m \geq 1$.
\end{remark}
\begin{remark}
Finally, we remark that it is possible to weaken the hypothesis that $X$ is smooth and $H$ is simple normal crossing to simply that: there exists an open neighborhood $U$ containing $W := H_1 \cap \ldots \cap H_r$ such that $U$ is smooth and $H|_U$ is simple normal crossing. The proof is unchanged. In fact, it is possible even to obtain similar statements if $W$ is an $F$-pure center of $(X, H)$.
\end{remark}
We use the method in the proof of Theorem~\ref{p_lifting} to also prove the lower bound on numerical dimension stated in the Introduction.
\noindent{\bf{Theorem \ref{thm_main}.}} {\it Suppose that $X$ is a smooth projective variety over an algebraically closed field $k$ of positive characteristic. If $D$ is a pseudo-effective
${\mathbf R}$-divisor
on $X$ which is not numerically equivalent to the negative part $N_{\sigma}(D)$ in its divisorial Zariski
decomposition, then $\kappa_{\sigma}(D)\geq 1$, that is, there is an ample divisor
$A$ on $X$ and $C>0$ such that
$$h^0(X,\mathcal{O}_X(\lfloor mD\rfloor+A))\geq Cm\,\,\text{for all}\,\,m\gg 0.$$
}
\begin{proof
Let $D=N_{\sigma}(D)+P_{\sigma}(D)$ be the divisorial Zariski decomposition of $D$.
We simply write $N_{\sigma}$ and $P_{\sigma}$ for $N_{\sigma}(D)$ and $P_{\sigma}(D)$,
respectively.
We fix a very ample divisor $H$ on $X$.
By assumption, $P_{\sigma}$ is not numerically trivial, and it is pseudo-effective by
Proposition~\ref{prop_Nakayama}. It follows from Lemma~\ref{lem_num_trivial1}
that $(P_{\sigma}\cdot H^{n-1})>0$, where $n=\dim(X)$.
For each $m\geq 1$, we will consider a curve $W_m$ in $X$,
given as the intersection of general
$(n-1)$ elements in the linear system $|H|$. Note that each such $W_m$ is smooth and connected, of genus
$g=\frac 1 2 ((K_X+(n-1)H)\cdot H^{n-1})+1$. We fix an ample divisor $G$ on $X$ such that
$(G\cdot H^{n-1})>\frac{2g-2}{p}$, where $p={\rm char}(k)$. It follows from
Remark~\ref{curve_case} that for every $W_m$ as above, the vanishing in
(\ref{eq.H1Vanishing}) holds.
Furthermore, arguing as in the proof of Theorem~\ref{p_lifting}, we see that after possibly
replacing $G$ by a multiple, we may assume that $G-\{mP_{\sigma}\}$ is ample for every
$m\geq 1$. Since $\lfloor mP_{\sigma}\rfloor+G=mP_{\sigma}+(G-\{mP_{\sigma}\})$,
it follows from Proposition~\ref{prop_Nakayama} that
$\mathbf {SB}(\lfloor mP_{\sigma}\rfloor+G)$ contains no codimension one subvarieties.
We can therefore choose $W_m$ as above for each $m\geq 1$ such that
$W_m\cap \mathbf {SB}(\lfloor mP_{\sigma}\rfloor+G)=\emptyset$.
It follows from the proof of Theorem~\ref{p_lifting} that if we take $A=K_X+H+2G$
(which may be assumed ample after replacing $G$ by a multiple), then
$$h^0(X,\mathcal{O}_X(\lfloor mP_{\sigma}\rfloor+A))\geq h^0(W_m,\mathcal{O}_X(\lfloor mP_{\sigma}\rfloor+A)\vert_{W_m})$$
for all $m\geq 1$.
Since $W_m$ is a smooth curve of genus $g$ and $(P_{\sigma}\cdot H^{n-1})>0$, we deduce from the Riemann-Roch theorem that
$$h^0(X,\mathcal{O}_X(\lfloor mP_{\sigma}\rfloor+A))\geq ((\lfloor mP_{\sigma}\rfloor+A)\cdot H^{n-1})-g
\geq Cm$$
for a suitable $C>0$ and all $m\gg 0$.
Since $N_{\sigma}$ is effective, the difference $\lceil mD\rceil-\lfloor mP_{\sigma}\rfloor$
is effective, hence
$$h^0(X,\mathcal{O}_X(\lceil mD\rceil+A))\geq h^0(X,\mathcal{O}_X(\lfloor mP_{\sigma}\rfloor+A))\geq Cm$$
for $m\gg 0$. As we have seen in the discussion of the definition of numerical dimension in
\S 2, this implies $\kappa_{\sigma}(D)\geq 1$.
\end{proof}
We conclude with an application of Theorem~\ref{p_lifting}, showing that also in positive
characteristic, the definition of numerical dimension that we have been using agrees in the case
of a nef ${\mathbf R}$-divisor with the definition in \cite{Kawamata}. Suppose that $X$ is a smooth $n$-dimensional projective variety over an algebraically closed field of positive characteristic. If $D$ is a nef
${\mathbf R}$-divisor on $X$, let us temporarily denote by $\nu(D)$ the largest $j\geq 0$ such that the
cycle class $D^j$ is not numerically trivial. It follows from Lemma~\ref{lem_num_trivial2}
that if $H$ is an ample divisor on $X$, then $\nu(D)$ is the largest $j$ such that
$(D^j\cdot H^{n-j})\neq 0$.
\begin{proposition}
\label{prop.KawamataNumericalDimensionEquivalent}
If $D$ is a nef ${\mathbf R}$-divisor on the smooth projective variety $X$ as above, then
$\kappa_{\sigma}(D)=\nu(D)$.
\end{proposition}
\begin{proof}
We first show that $\kappa_{\sigma}(D)\geq\nu(D)$. Let $n=\dim(X)$. If $\nu(D)=n$, then
$D$ is big, and in this case it is clear that $\kappa_{\sigma}(D)\geq n$. Suppose now that
$\nu(D)=j<n$, and let $H$ be a very ample divisor on $X$. If $H_1,\ldots,H_{n-j}$
are general elements in the linear system $|H|$, then $H_1+\ldots+H_{n-j}$ has simple normal crossings.
It follows from Theorem~\ref{p_lifting} that if $W=H_1\cap\ldots\cap H_{n-j}$, then there is an ample divisor $A$ on $X$ such that
$$H^0(X,\mathcal{O}(\lfloor mD\rfloor+A))\to H^0(W,\mathcal{O}_X(\lfloor mD\rfloor+A)\vert_W)$$
is surjective for every $m\geq 1$. On the other hand, since $(D^j\cdot H^{n-j})>0$, it follows that
$\mathcal{O}_X(D)\vert_W$ is big.
Furthermore, after possibly replacing $H$ by a multiple, we may assume that
the following holds: if we write $D=
a_1D_1+\ldots+a_sD_s$, with the $D_i$ distinct prime divisors and $a_i\in {\mathbf R}$, then also the
$D_i\vert_W$ are distinct
prime divisors. In particular, $D\vert_W$ is well-defined as a divisor and
$\lfloor mD\rfloor\vert_W=\lfloor mD\vert_W\rfloor$.
By putting all these together, we conclude that
there is $C>0$ such that
$$h^0(X,\mathcal{O}(\lfloor mD\rfloor+A))\geq h^0(W,\mathcal{O}_X(\lfloor mD\rfloor+A)\vert_W)
\geq Cm^j$$
for all $m\gg 0$, hence $\kappa_{\sigma}(D)\geq j$.
We prove the reverse inequality by induction on $n$. If $D$ is big, then $\nu(D)=n$,
hence we are done. Otherwise, let $r=\kappa_{\sigma}(D)$, and let $A$ be an ample
divisor on $X$ such that
$$\liminf_{m\to\infty} \frac {h^0(X,\mathcal{O}_X(\lfloor mD\rfloor+A))}{m^r}>0.$$
If $n=1$, then $r=0$ and there is nothing to prove. Therefore we assume $n\geq 2$.
Let $\ell$ be a positive integer such that $(\ell+1) A$ is very ample, and choose $E$ a general element in the linear system $|(\ell +1)A|$. We have an exact sequence
$$0\to H^0(X,\mathcal{O}_X(\lfloor mD\rfloor-\ell A))\to H^0(X,\mathcal{O}_X(\lfloor mD\rfloor+A))
\to H^0(E, \mathcal{O}_X(\lfloor mD\rfloor+A)\vert_E).$$
Since $D$ is not big, it follows that $H^0(X,\mathcal{O}_X(\lfloor mD\rfloor-\ell A))=0$ for every $m\geq 1$, and the exact sequence implies
\begin{equation}\label{eq_final}
\liminf_{m\to\infty} \frac {h^0(E,\mathcal{O}_X(\lfloor mD\rfloor+A)\vert_E)}{m^r}>0.
\end{equation}
Since $E$ is general, we may assume that $E$ is smooth. Furthermore,
arguing as above, we see that we may assume that
$D\vert_E$ is well-defined as a divisor, and
$\lfloor mD\rfloor\vert_E=\lfloor mD\vert_E\rfloor$ for every $m$.
Therefore
(\ref{eq_final}) gives
$\kappa_{\sigma}(D)\leq\kappa_{\sigma}(D\vert_E)$. On the other hand, since $D$ is not big
and $E$ is ample, we have $\nu(D)=\nu(D\vert_E)$, and using the inductive assumption we obtain
$$\kappa_{\sigma}(D)\leq \kappa_{\sigma}(D\vert_E)\leq \nu(D\vert_E)=\nu(D).$$
This completes the proof of the proposition.
\end{proof}
\begin{remark}
In fact, the assertion in Proposition~\ref{prop.KawamataNumericalDimensionEquivalent}
also holds on singular projective varieties. More precisely, suppose that $X$ is a
normal projective variety
over an algebraically closed field of positive characteristic, and let $n=\dim(X)$. If $D$ is a nef
${\mathbf R}$-Cartier ${\mathbf R}$-divisor on $X$, then $\kappa_{\sigma}(D)=\nu(D)$, where if $H$
is an ample Cartier divisor on $X$, we denote by $\nu(D)$ the largest $j$ such that
$(D^j\cdot H^{n-j})\neq 0$. Indeed, let $\pi\colon Y\to X$ be an alteration, with $Y$ smooth.
We have $\kappa_{\sigma}(D)=\kappa_{\sigma}(\pi^*(D))$
by Proposition~\ref{alteration} and $\kappa_{\sigma}(\pi^*(D))=\nu(\pi^*(D))$ by
Proposition~\ref{prop.KawamataNumericalDimensionEquivalent}. Therefore it is enough to show that $\nu(D)=\nu(\pi^*(D))$.
Let $r=\nu(D)$.
Note first that $(\pi^*(D)^{r}\cdot \pi^*(H)^{n-r})={\rm deg}(\pi)(D^r\cdot H^{n-r})\neq 0$, hence
$\pi^*(D)^{r}$ is not numerically trivial. We similarly obtain
$(\pi^*(D)^{r+1}\cdot \pi^*(H)^{n-r-1})=0$. Note that $\pi^*(H)$ is nef and big. In particular, we can write $\pi^*(H)=A+E$, for ${\mathbf R}$-divisors $A$ and $E$ on $X$, with $A$ ample and $E$
effective. Since $\pi^*(H)$ and $\pi^*(D)$ are nef, we have
$$(\pi^*(D)^{r+1}\cdot A^{n-r-1})\leq (\pi^*(D)^{r+1}\cdot \pi^*(H)^{n-r-1})=0.$$
It follows from Proposition~\ref{lem_num_trivial2} that $\nu(\pi^*(D))=r$, which completes the proof of our assertion.
\end{remark}
\begin{remark}
J.~Koll\'{a}r pointed out to us that one can give a proof of
Proposition~\ref{prop.KawamataNumericalDimensionEquivalent} using Fujita's vanishing theorem and Matsusaka's results on variable intersection cycles
\cite{Matsusaka}. His argument, in fact, works directly on arbitrary normal varieties.
\end{remark}
\providecommand{\bysame}{\leavevmode \hbox \o3em
{\hrulefill}\thinspace}
|
{
"timestamp": "2013-06-13T02:00:51",
"yymm": "1206",
"arxiv_id": "1206.6521",
"language": "en",
"url": "https://arxiv.org/abs/1206.6521"
}
|
\section{Introduction and main results}
In this paper, we consider the following semilinear elliptic equation, which has been extensively studied:
$$
\left\{
\begin{array}{ll}
-\Delta u=\lambda f(u)\ \ \ \ \ \ \ & \mbox{ in } \Omega \, ,\\
u\geq 0 & \mbox{ in } \Omega \, ,\\
u=0 & \mbox{ on } \partial\Omega \, ,\\
\end{array}
\right. \eqno{(P_\lambda)}
$$
\
\noindent where $\Omega\subset\real^N$ is a smooth bounded domain,
$N\geq 1$, $\lambda\geq 0$ is a real parameter and the
nonlinearity $f:[0,\infty)\rightarrow \real$ satisfies
\begin{equation}\label{convexa}
f \mbox{ is } C^1, \mbox{ nondecreasing and convex, }f(0)>0,\mbox{
and }\lim_{u\to +\infty}\frac{f(u)}{u}=+\infty.
\end{equation}
It is well known that there exists a finite positive extremal
parameter $\lambda^\ast$ such that ($P_\lambda$) has a minimal
classical solution $u_\lambda\in C^2(\overline{\Omega})$ if $0\leq
\lambda <\lambda^\ast$, while no solution exists, even in the weak
sense, for $\lambda>\lambda^\ast$. The set $\{u_\lambda:\, 0\leq
\lambda < \lambda^\ast\}$ forms a branch of classical solutions
increasing in $\lambda$. Its increasing pointwise limit
$u^\ast(x):=\lim_{\lambda\uparrow\lambda^\ast}u_\lambda(x)$ is a
weak solution of ($P_\lambda$) for $\lambda=\lambda^\ast$, which
is called the extremal solution of ($P_\lambda$) (see
\cite{Bre,BV,Dup}). In fact, if $f$ satisfies all the hypotheses of
(\ref{convexa}) except the convexity, then all the results we have mentioned
remain true, except the continuity of the family of minimal solutions
$\{ u_\lambda \}$ as a function of $\lambda$ (see \cite[Proposition 5.1]{cc}).
The regularity and properties of the extremal solutions depend
strongly on the dimension $N$, domain $\Omega$ and nonlinearity
$f$. When $f(u)=e^u$, it is known that $u^\ast\in L^\infty
(\Omega)$ if $N<10$ (for every $\Omega$) (see \cite{CrR,MP}),
while $u^\ast (x)=-2\log \vert x\vert$ and $\lambda^\ast=2(N-2)$
if $N\geq 10$ and $\Omega=B_1$ (see \cite{JL}). There is an
analogous result for $f(u)=(1+u)^p$ with $p>1$ (see \cite{BV}).
Brezis and V\'azquez \cite{BV} raised the question of determining
the boundedness of $u^\ast$, depending on the dimension $N$, for
general nonlinearities $f$ satisfying (\ref{convexa}). The first general results were due to Nedev \cite{Ne}, who proved that $u^\ast \in
L^\infty (\Omega)$ if $N\leq 3$, and $u^\ast \in
L^p (\Omega)$ for every $p<N/(N-4)$, if $N\geq 4$. The best known result was established by Cabr\'e \cite{cabre4}, who proved that $u^\ast \in
L^\infty (\Omega)$ if $N\leq 4$ and $\Omega$ is convex (no convexity on $f$ is imposed). If $N\geq 5$ and $\Omega$ is convex Cabr\'e and Sanch\'on \cite{casa} have obtained that $u^\ast \in
L^\frac{2N}{N-4} (\Omega)$ (again, no convexity on $f$ is imposed). On the other hand, Cabr\'e and Capella \cite{cc} have proved
that $u^\ast \in L^\infty (\Omega)$ if $N\leq 9$ and $\Omega=B_1$. Recently, Cabr\'e and Ros-Oton \cite{cros} have obtained that $u^\ast \in L^\infty (\Omega)$ if $N\leq 7$ and $\Omega$ is a convex domain of double revolution (see \cite{cros} for the definition).
Another interesting question is whether the
extremal solution lies in the energy class. Nedev \cite{Ne,Ne2}
proved that $u^\ast \in H_0^1(\Omega)$ if $N\leq 5$ (for every
$\Omega$) or $\Omega$ is convex (for every $N\geq 1$). Brezis and V\'azquez \cite{BV} proved that a sufficient condition
to have $u^\ast \in H_0^1(\Omega)$ is that $\liminf_{u\to \infty}
u\, f'(u)/f(u)>1$ (for every $\Omega$ and $N\geq 1$).
In this paper we establish the boundedness of the extremal solution for general bounded smooth domains in dimension $4$, not necessarily convex. Contrary to the result of Cabr\'e, we need to impose the convexity of $f$. In higher dimensions, we improve the results of Nedev \cite{Ne,Ne2} and it is obtained that $u^\ast \in L^\frac{N}{N-4}(\Omega)$, if $N\geq 5$ and $u^\ast \in H_0^1(\Omega)$, if $N=6$.
\begin{theorem}\label{N=4}
Let $f$ be a function satisfying (\ref{convexa}) and $\Omega\subset \mathbb{R}^4$ be a smooth bounded domain. Let $u^\ast$ be the extremal
solution of ($P_\lambda$). Then $u^\ast\in L^\infty (\Omega)$.
\end{theorem}
\begin{theorem}\label{N>4}
Let $f$ be a function satisfying (\ref{convexa}) and $\Omega\subset \mathbb{R}^N$ be a smooth bounded domain. Let $u^\ast$ be the extremal
solution of ($P_\lambda$). Then, for $N\geq 5$, $u^\ast\in W^{2,\frac{N}{N-2}}(\Omega)$ and $f(u^\ast)\in L^\frac{N}{N-2}(\Omega)$ . In particular,
\begin{enumerate}
\item[i)]If $N\geq 5$, then $\displaystyle{u^\ast\in L^\frac{N}{N-4}(\Omega)}$.
\item[ii)] If $N=6$, then $u^\ast \in H_0^1(\Omega)$.
\end{enumerate}
\end{theorem}
The proofs of Theorems \ref{N=4} and \ref{N>4} use the semi-stability of of the minimal solutions $u_\lambda$ ($0<\lambda<\lambda^\ast$).
Recall that a classical solution $u$ of
\begin{equation}\label{general}
\left\{
\begin{array}{ll}
-\Delta u=g(u)\ \ \ \ \ \ \ & \mbox{ in } \Omega \, ,\\
u=0 & \mbox{ on } \partial\Omega \, ,\\
\end{array}
\right.
\end{equation}
\noindent where $N\geq 1$, $g\in C^1(\mathbb{R})$ and $\Omega\subset \mathbb{R}^N$ is a smooth bounded domain, is semistable if
$$\int_\Omega \left( \vert \nabla \xi\vert^2-g'(u)\xi^2\right) \,
dx\geq 0\, ,$$ \noindent for every $\xi\in C^\infty (\Omega)$ with compact
support in $\Omega$.
Note that this expression is the second variation of energy at $u$. The semistability of a solution $u$ is equivalent to the nonnegativity of $\lambda_1\left(-\Delta-g'(u);\Omega\right)$, the first Dirichlet eigenvalue of the linearized operator $-\Delta-g'(u)$ at $u$ in $\Omega$.
To prove our main results we will use the following lemma, which follows easily from a result of Nedev \cite{Ne}.
\begin{lemma}\label{est}
Let $N\geq 1$, $f$ be a function satisfying (\ref{convexa}) and $\Omega\subset \mathbb{R}^N$ be a smooth bounded domain. Then there exists a positive constant $M=M(f,\Omega)$, depending on $f$ and $\Omega$, but not on $\lambda \in (0,\lambda^\ast)$, such that
$$\int_{u_\lambda>1} \frac{f(u_\lambda)^2}{u_\lambda}\leq M\, , \ \ \ \forall \lambda\in (0,\lambda^\ast).$$
\end{lemma}
\begin{proof}
Using the semistability of the minimal solutions $u_\lambda$, Nedev (see the proof of Theorem 1 in \cite{Ne}) obtained that
\begin{equation}\label{nedev}\int_\Omega \frac{(f(u_\lambda)-f(0))^2}{u_\lambda}\leq M_1 \, , \ \ \ \forall \lambda\in (0,\lambda^\ast),\end{equation}
\noindent where $M_1$ is a constant independent of $\lambda$. On the other hand, since $\lim_{s\to +\infty}f(s)=+\infty$, then $\lim_{s\to +\infty}\left(2(f(s)-f(0))^2-f(s)^2\right)=+\infty$. Thus $2(f(s)-f(0))^2-f(s)^2\geq -M_2$, for every $s\geq 0$, where $M_2$ is a constant depending only on $f$. Applying this and (\ref{nedev}), we conclude that
$$\int_{u_\lambda>1} \frac{f(u_\lambda)^2}{u_\lambda}\leq\int_{u_\lambda>1} \frac{M_2+2\left(f(u_\lambda)-f(0)\right)^2}{u_\lambda}\leq M_2\vert \Omega \vert+2M_1,$$
\noindent and the lemma follows.
\end{proof}
The paper is organized as follows. Section \ref{N==4} deals with dimension $N$=4 and we prove Theorem \ref{N=4}. In Section \ref{N>>4}, the estimates of Theorem \ref{N>4} are proved. Finally, in Section \ref{sobolevv} we obtained some new $W^{1,q}$ and $W^{2,q}$ estimates of the extremal solution $u^\ast$.
\section{The case $N=4$}\label{N==4}
The following theorem is due to Cabr\'e, and it is the main estimate used in the proof of the results of \cite{cabre4}. We will use it, in order to obtain Lemma \ref{C1}.
\begin{theorem}(\cite{cabre4}).\label{desigualdades}
Let $g$ be any $C^\infty$-function and $\Omega\subset \mathbb{R}^N$ any smooth bounded domain. Assume that $2\leq N\leq 4$.
Let $u\in C^1_0(\overline{\Omega})$, with $u>0$ in $\Omega$, a classical semistable solution of (\ref{general}). Then, for every $t>0$,
$$
\Vert u\Vert_{L^\infty (\Omega)}\leq t+\frac{K}{t}\vert\Omega\vert^\frac{4-N}{2N}\left(\int_{u<t}\vert\nabla u\vert^4\right)^{1/2},
$$
\noindent where $K$ is a universal constant (in particular, independent of $g$, $\Omega$ and $u$).
\end{theorem}
\begin{lemma}\label{C1}
Let $g$ be any $C^1$-function satisfying $g(0)>0$ and $\Omega\subset \mathbb{R}^N$ any smooth bounded domain. Assume that $2\leq N\leq 4$.
Let $u\in C^1_0(\overline{\Omega})$, with $u>0$ in $\Omega$, a classical minimal positive solution of (\ref{general}) (i.e., $u$ is the only solution of (\ref{general}) in the set $\left\{ w\in C^1_0(\overline{\Omega}):\, 0\leq w\leq u\right\}$). Then, for every $t>0$,
$$
\Vert u\Vert_{L^\infty (\Omega)}\leq t+\frac{K}{t}\vert\Omega\vert^\frac{4-N}{2N}\left(\int_{u<t}\vert\nabla u\vert^4\right)^{1/2},
$$
\noindent where $K$ is a universal constant (in particular, independent of $g$, $\Omega$ and $u$). In fact, we can take the same constant $K$ of Theorem \ref{desigualdades}.
\end{lemma}
\begin{proof} Let $L=\Vert u \Vert_{L^\infty(\Omega)}$. Take a sequence of polynomials $p_n$ such that $p_n(x)<g(x)$ for every $x\in [0,L]$ and $p_n\to g$ in $L^\infty (0,L)$ as $n\to\infty$. (Take for instance $p_n$ such that $g-2/n\leq p_n\leq g-1/n$ in $[0,L]$). Hence $u$ is a strict supersolution of the problem
$$
\left\{
\begin{array}{ll}
-\Delta w=p_n(w)\ \ \ \ \ \ \ & \mbox{ in } \Omega \, ,\\
w=0 & \mbox{ on } \partial\Omega \, ,\\
\end{array}
\right. \eqno{(P_n)}
$$
\
On the other hand, since $g(0)>0$ and $p_n\to g$ in $L^\infty (0,L)$ as $n\to\infty$, we have that, up to a subsequence, $p_n(0)>0$ for every $n\in \natu$. This is equivalent to the fact that the trivial function $0$ is a strict subsolution of the problem $(P_n)$. Then, the energy functional for this equation is well defined in the closed convex set of $H_0^1(\Omega)$ functions $w$ satisfying $0\leq w\leq u$, and it admits an absolute minimizer $u_n$ in this convex set. It is well known that $u_n$ is a classical semistable solution of $(P_n)$ (see \cite[Rem. 1.11]{cc} for more details). Therefore, by Theorem \ref{desigualdades}
\begin{equation}\label{generaln}
\begin{array}{ll}
\displaystyle{\Vert u_n\Vert_{L^\infty (\Omega)}\leq t+\frac{K}{t}\vert\Omega\vert^\frac{4-N}{2N}\left(\int_{u_n<t}\vert\nabla u_n\vert^4\right)^{1/2}, \ \forall t>0.}
\end{array}
\end{equation}
Since $\Vert u_n \Vert_{L^\infty(\Omega)}\leq L$, then $\Vert p_n(u_n) \Vert_{L^\infty(\Omega)}\leq \Vert p_n \Vert_{L^\infty(0,L)}\leq C'$, for some constant $C'$. Thus, by elliptic regularity (see \cite{ADN}), $\Vert u_n \Vert_{W^{2,p}(\Omega)}$ is bounded for every $1<p<\infty$. Choosing $p>N$, we can suppose, up to a subsequence, that $u_n\rightharpoonup u_0$ in $W^{2,p}(\Omega)$ and $u_n\rightarrow u_0$ in $C^1_0(\overline{\Omega})$ for some function $u_0\in W^{2,p}(\Omega)$. On the other hand
$$\Vert p_n(u_n)-g(u)\Vert_{L^\infty(\Omega)}\leq \Vert p_n(u_n)-g(u_n)\Vert_{L^\infty(\Omega)}+\Vert g(u_n)-g(u)\Vert_{L^\infty(\Omega)}$$
$$\leq \Vert p_n -g\Vert_{L^\infty(0,L)}+\Vert u_n-u\Vert_{L^\infty(\Omega)}\Vert g'\Vert_{L^\infty(0,L)}\rightarrow 0, \mbox{ as } n\rightarrow \infty.$$
Thus $p_n(u_n)\rightarrow g(u)$ in $L^\infty (\Omega)$ and it follows easily that $u_0$ is a classical solution of (\ref{general}). Since $0\leq u_0\leq u$ and $u$ is a classical minimal positive solution of (\ref{general}), we deduce that $u_0=u$. Hence $u_n\rightarrow u$ in $C_0^1(\overline{\Omega})$.
We claim that $\chi_{\{u_n\leq t\} }(x)\to \chi_{\{u\leq t\} }(x)$ for every $x\in \Omega$ and $t>0$. Here $\chi_A$ denotes the characteristic function of the set $A$. Indeed, if $u(x)\leq t$ then $u_n(x)\leq u(x)\leq t$ and consequently $\chi_{\{u_n\leq t\}}(x)=\chi_{\{u\leq t\}}(x)=1$. If $u(x)>t$ then, by the $L^\infty(\Omega)$ convergence of $u_n$ to $u$, we have that $u_n(x)>t$ for large $n$ and it follows that $\chi_{\{u_n\leq t\}}(x)\to 0=\chi_{\{u\leq t\}}(x)$.
Applying this, the convergence of $u_n$ to $u$ in $C_0^1(\overline{\Omega})$, Sard's Theorem and the Lebesgue's dominated convergence Theorem we conclude, for every $t>0$, that
$$\int_{u_n<t} \vert \nabla u_n\vert^4=\int_{u_n\leq t} \vert \nabla u_n\vert^4=\int_\Omega \vert\nabla u_n\vert^4 \chi_{\{u_n\leq t\}}\rightarrow \int_\Omega \vert\nabla u\vert^4 \chi_{\{u\leq t\}}$$
$$=\int_{u\leq t} \vert \nabla u\vert^4=\int_{u<t} \vert \nabla u\vert^4.$$
Thus, taking limit as $n$ tends to $\infty$ in (\ref{generaln}), the proof is complete.
\end{proof}
\begin{proposition}\label{des}
Let $g$ be a function and $\Omega\subset \mathbb{R}^4$ any smooth bounded domain. Let $u\in C^1_0(\overline{\Omega})$, with $u>0$ in $\Omega$, a classical solution of (\ref{general}). Suppose that one of the following holds:
\begin{enumerate}
\item[(i)] $g\in C^\infty$ and $u$ is a semistable solution.
\item[(ii)] $g\in C^1$, $g(0)>0$ and $u$ is a minimal positive solution (i.e., $u$ is the only solution of (\ref{general}) in the set $\left\{ w\in C^1_0(\overline{\Omega}):\, 0\leq w\leq u\right\}$).
\end{enumerate}
Then, there exists a universal constant C (in particular, independent of $g$, $\Omega$, and $u$) such that
$$\Vert u\Vert_{L^\infty (\Omega)}\leq C \Vert \nabla u\Vert_{L^4 (\Omega)}.$$
\end{proposition}
\begin{proof} Applying Theorem \ref{desigualdades} and Lemma \ref{C1} with $N=4$, we can assert that
$$
\Vert u\Vert_{L^\infty (\Omega)}\leq t+\frac{K}{t}\left(\int_{u<t}\vert\nabla u\vert^4 \right)^{1/2}\leq t+\frac{K}{t}\left(\int_\Omega\vert\nabla u\vert^4 \right)^{1/2}, \, \forall t>0.
$$
Taking $\displaystyle{t=\left(\int_\Omega\vert\nabla u\vert^4 \right)^{1/4}}$ in this expression we obtain
$$
\Vert u\Vert_{L^\infty (\Omega)}\leq\left(\int_\Omega\vert\nabla u\vert^4 \right)^{1/4}+\frac{K}{\displaystyle{\left(\int_\Omega\vert\nabla u\vert^4 \right)^{1/4}}}\left(\int_\Omega\vert\nabla u\vert^4 \right)^{1/2}$$
$$=\left(1+K\right)\Vert \nabla u\Vert_{L^4 (\Omega)},$$
\
\noindent and the lemma follows with $C=1+K$.
\end{proof}
\begin{remark}\label{C} From classical embedding results of Sobolev spaces, it is well-known that, for a smooth bounded domain $\Omega\subset \mathbb{R}^4$, we have the continuous inclusions $W^{1,4}\subset L^p$, for every $1\leq p<\infty$, and $W^{1,4+\varepsilon}\subset L^\infty$, for every $\varepsilon >0$. On the other hand it is also well-known that $W^{1,4}\not\subset L^\infty$, which is equivalent to the unboundness of the quotients $\Vert u\Vert_{L^\infty (\Omega)}/\Vert \nabla u\Vert_{L^4 (\Omega)}$, $u\in C^1_0(\overline{\Omega})$, with $u>0$ in $\Omega$. The previous proposition asserts that, under some stability hypothesis on $u$, these quotients are bounded.
\end{remark}
\noindent {\bf Proof of Theorem \ref{N=4}.} It is well-known that, for every smooth domain $\Omega\subset\mathbb{R}^4$, we have the continuous inclusion $W^{2,2}(\Omega)\subset W^{1,4}(\Omega)$. Thus, there exists a constante $C_1=C_1(\Omega)$, depending only on $\Omega$, such that
\begin{equation}\label{sobolev}\Vert u\Vert_{W^{1,4}(\Omega)}\leq C_1\Vert u\Vert_{W^{2,2}(\Omega)}\, , \ \ \ \ \ \forall u\in W^{2,2}(\Omega).\end{equation}
On the other hand, by elliptic regularity (see \cite{ADN}), there exists a constant $C_2=C_2(\Omega)$, depending only on $\Omega$, such that
\begin{equation}\label{regularity}\Vert u_\lambda\Vert_{W^{2,2}(\Omega)}\leq C_2\Vert \lambda f(u_\lambda) \Vert_{L^2(\Omega)}.\end{equation}
Applying (\ref{sobolev}), (\ref{regularity}), Lemma \ref{est} and Proposition \ref{des} (part (ii), with $g=\lambda f$), we deduce, for every $\lambda\in (0,\lambda^\ast)$, that
$$\Vert u_\lambda\Vert_{L^\infty}\leq C \Vert \nabla u_\lambda\Vert_{L^4}\leq C\Vert u_\lambda\Vert_{W^{1,4}}\leq C C_1 \Vert u_\lambda\Vert_{W^{2,2}}\leq C C_1 C_2\Vert \lambda f(u_\lambda) \Vert_{L^2}$$
$$\leq C C_1 C_2 \lambda^\ast \left( \int_{u_\lambda\leq 1} f(u_\lambda)^2+\int_{u_\lambda >1} f(u_\lambda)^2\right)^{1/2}$$
$$\leq C C_1 C_2 \lambda^\ast \left( f(1)^2\vert \Omega \vert+\int_{u_\lambda>1} \frac{f(u_\lambda)^2}{u_\lambda}u_\lambda\right)^{1/2}$$
$$\leq C C_1 C_2 \lambda^\ast \left( f(1)^2\vert \Omega \vert+M \Vert u_\lambda\Vert_{L^\infty} \right)^{1/2}.$$
\
Therefore $\Vert u_\lambda\Vert_{L^\infty}^2\leq A +B\Vert u_\lambda\Vert_{L^\infty}$, for certain constant $A,B$ depending on $f$ and $\Omega$, but not on $\lambda\in (0,\lambda^\ast)$. We conclude that $\Vert u_\lambda\Vert_{L^\infty}$ is uniformly bounded in $\lambda\in(0,\lambda^\ast)$, and finally, taking limit $\lambda \to \lambda^\ast$, that $u^\ast \in L^\infty (\Omega)$. \qed
\section{The case $N\geq 5$}\label{N>>4}
\noindent {\bf Proof of Theorem \ref{N>4}.} Let $N\geq 5$. It is well-known that, for every smooth domain $\Omega\subset\mathbb{R}^N$ and exponent $1<p<N/2$, we have the continuous inclusion $W^{2,p}(\Omega)\subset L^\frac{Np}{N-2p}(\Omega)$. Thus, taking $p=N/(N-2)$, there exists a constante $C_3=C_3(\Omega)$, depending only on $\Omega$, such that
\begin{equation}\label{sobolevN}\Vert u\Vert_{L^\frac{N}{N-4}(\Omega)}\leq C_3\Vert u\Vert_{W^{2,\frac{N}{N-2}}(\Omega)}\, , \ \ \ \ \ \forall u\in W^{2,\frac{N}{N-2}}(\Omega).\end{equation}
On the other hand, by elliptic regularity (see \cite{ADN}), there exists a constant $C_4=C_4(\Omega)$, depending only on $\Omega$, such that
\begin{equation}\label{regularityN}\Vert u_\lambda\Vert_{W^{2,\frac{N}{N-2}}(\Omega)}\leq C_4\Vert \lambda f(u_\lambda) \Vert_{L^\frac{N}{N-2}(\Omega)}.\end{equation}
Applying (\ref{sobolevN}), (\ref{regularityN}), Lemma \ref{est} and H{\"o}lder inequality, we deduce, for every $\lambda\in (0,\lambda^\ast)$, that
$$\Vert u_\lambda\Vert_{W^{2,\frac{N}{N-2}}(\Omega)}\leq C_4 \Vert \lambda f(u_\lambda) \Vert_{L^\frac{N}{N-2}(\Omega)}$$
$$\leq C_4 \lambda^\ast \left( \int_{u_\lambda\leq 1} f(u_\lambda)^\frac{N}{N-2}+\int_{u_\lambda >1} f(u_\lambda)^\frac{N}{N-2}\right)^\frac{N-2}{N}$$
$$\leq C_4 \lambda^\ast \left( f(1)^\frac{N}{N-2}\vert \Omega \vert+\int_{u_\lambda>1} \left(\frac{f(u_\lambda)^2}{u_\lambda}\right)^\frac{N}{2(N-2)}u_\lambda^\frac{N}{2(N-2)}\right)^\frac{N-2}{N}$$
$\leq C_4 \lambda^\ast \left( f(1)^\frac{N}{N-2}\vert \Omega \vert+\left\Vert\left(\frac{f(u_\lambda)^2}{u_\lambda}\right)^\frac{N}{2(N-2)}\right\Vert_{L^\frac{2(N-2)}{N}\left(\left\{u_\lambda>1\right\}\right)} \right.$
$ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \left. \times\left\Vert u_\lambda^\frac{N}{2(N-2)}\right\Vert_{L^\frac{2(N-2)}{N-4}\left(\left\{u_\lambda>1\right\}\right)}\right)^\frac{N-2}{N}$
$$\leq C_4 \lambda^\ast \left( f(1)^\frac{N}{N-2}\vert \Omega \vert+M^\frac{N}{2(N-2)}\Vert u_\lambda\Vert_{L^\frac{N}{N-4}(\Omega)}^\frac{N}{2(N-2)}\right)^\frac{N-2}{N}$$
$$\leq C_4 \lambda^\ast \left( f(1)^\frac{N}{N-2}\vert \Omega \vert+M^\frac{N}{2(N-2)}\left( C_3 \Vert u_\lambda\Vert_{W^{2,\frac{N}{N-2}}(\Omega)}\right)^\frac{N}{2(N-2)}\right)^\frac{N-2}{N}.$$
\
Therefore $\Vert u_\lambda\Vert_{W^{2,\frac{N}{N-2}}(\Omega)}^\frac{N}{N-2}\leq A +B\Vert u_\lambda\Vert_{W^{2,\frac{N}{N-2}}(\Omega)}^\frac{N}{2(N-2)}$, for certain constant $A,B$ depending on $f$ and $\Omega$, but not on $\lambda\in (0,\lambda^\ast)$. It follows that $\displaystyle{\Vert u_\lambda\Vert_{W^{2,\frac{N}{N-2}}(\Omega)}}$ is uniformly bounded in $\lambda\in(0,\lambda^\ast)$ and, taking into account the previous inequalities, $\displaystyle{\Vert f(u_\lambda)\Vert_{L^\frac{N}{N-2}}(\Omega)}$ is also uniformly bounded in $\lambda\in(0,\lambda^\ast)$. Therefore, taking limit $\lambda \to \lambda^\ast$, we deduce that $u^\ast \in W^{2,\frac{N}{N-2}}(\Omega)$ and $f(u^\ast) \in L^\frac{N}{N-2}(\Omega)$.
Finally, since $N\geq 5$, we have that $\displaystyle{W^{2,\frac{N}{N-2}}(\Omega)\subset W^{1,\frac{N}{N-3}}(\Omega)\subset L^\frac{N}{N-4}(\Omega)}$ and we conclude i) and ii). \qed
\section{Some new $W^{1,q}$ and $W^{2,q}$ estimates}\label{sobolevv}
\begin{proposition}\label{fijate}
Let $N\geq 5$, $f$ be a function satisfying (\ref{convexa}) and $\Omega\subset \mathbb{R}^N$ be a smooth bounded domain. Let $u^\ast$ be the extremal
solution of ($P_\lambda$). Suppose that $u^\ast \in L^p(\Omega)$ for some $p\in (1,\infty)$. Then $f(u^\ast)\in L^\frac{2p}{p+1}(\Omega)$ and $u^\ast \in W^{2,\frac{2p}{p+1}}(\Omega)\subset W^{1,\frac{2pN}{(p+1)N-2p}}(\Omega)$.
\end{proposition}
\begin{proof} Applying Lemma \ref{est} and H{\"o}lder inequality, we deduce, for every $\lambda\in (0,\lambda^\ast)$, that
$$\Vert f(u_\lambda) \Vert_{L^\frac{2p}{p+1}(\Omega)}^\frac{2p}{p+1}=\int_{u_\lambda\leq 1} f(u_\lambda)^\frac{2p}{p+1}+\int_{u_\lambda >1}\left(\frac{f(u_\lambda)^2}{u_\lambda}\right)^\frac{p}{p+1}u_\lambda^\frac{p}{p+1}$$
$$\leq f(1)^\frac{2p}{p+1}\vert \Omega \vert+\left\Vert\left(\frac{f(u_\lambda)^2}{u_\lambda}\right)^\frac{p}{p+1}\right\Vert_{L^\frac{p+1}{p}\left(\left\{u_\lambda>1\right\}\right)}\left\Vert u_\lambda^\frac{p}{p+1}\right\Vert_{L^{p+1}\left(\left\{u_\lambda>1\right\}\right)}$$
$$\leq f(1)^\frac{2p}{p+1}\vert \Omega \vert+M^\frac{p}{p+1}\Vert u_\lambda\Vert_{L^p(\Omega)}^\frac{p}{p+1}\leq f(1)^\frac{2p}{p+1}\vert \Omega \vert+M^\frac{p}{p+1}\Vert u^\ast\Vert_{L^p(\Omega)}^\frac{p}{p+1}.$$
Letting $\lambda\uparrow\lambda^\ast$ and using the monotone convergence Theorem, we deduce that $f(u^\ast)\in L^\frac{2p}{p+1}(\Omega)$.
On the other hand, by elliptic regularity (\cite{ADN}), there exists $C_5=C_5(p,\Omega)$, depending only on $p$ and $\Omega$, such that
$$\Vert u_\lambda\Vert_{W^{2,\frac{2p}{p+1}}(\Omega)}\leq C_5 \left\Vert \lambda f(u_\lambda)\right\Vert_{L^\frac{2p}{p+1}(\Omega)}\leq C_5 \lambda^\ast \left\Vert f(u^\ast)\right\Vert_{L^\frac{2p}{p+1}(\Omega)}, \forall \lambda\in (0,\lambda^\ast).$$
Hence $\Vert u_\lambda\Vert_{W^{2,\frac{2p}{p+1}}(\Omega)}$ is uniformly bounded in $\lambda\in (0,\lambda^\ast)$. We conclude that $u^\ast \in W^{2,\frac{2p}{p+1}}(\Omega)\subset W^{1,\frac{2pN}{(p+1)N-2p}}(\Omega)$.
\end{proof}
\begin{corollary}\label{mirapordonde}
Let $5\leq N\leq 7$, $f$ be a function satisfying (\ref{convexa}) and $\Omega\subset \mathbb{R}^N$ be a convex smooth bounded domain. Let $u^\ast$ be the extremal
solution of ($P_\lambda$). Then $u^\ast \in W_0^{1,\frac{4N}{3N-8}}(\Omega)$.
\end{corollary}
\begin{proof} As we have mentioned in the Introduction, by a result of Cabr\'e and Sanch\'on \cite{casa}, if $\Omega$ is convex then $u^\ast\in L^\frac{2N}{N-4}(\Omega)$. Applying Proposition \ref{fijate} with $p=2N/(N-4)$, we obtain $u^\ast \in W^{2,\frac{4N}{3N-4}}(\Omega)\subset W^{1,\frac{4N}{3N-8}}(\Omega)$ and the corollary follows.
\end{proof}
\begin{remark} In the proof of the previous corollary we have not used that $N\leq 7$. It is immediate that $4N/(3N-8)\leq 2$ if and only if $N\geq 8$. Since $u^\ast \in H_0^1(\Omega)$ for convex smooth domains then the previous result has no interest for dimensions $N\geq 8$ and we prefer to state it in dimensions $5\leq N\leq 7$.
\end{remark}
\begin{corollary}\label{L3}
Let $N\geq 7$, $f$ be a function satisfying (\ref{convexa}) and $\Omega\subset \mathbb{R}^N$ be a smooth bounded domain. Let $u^\ast$ be the extremal
solution of ($P_\lambda$). Suppose that $u^\ast \in L^3(\Omega)$. Then $u^\ast \in H_0^1(\Omega)$.
\end{corollary}
\begin{proof} Applying Proposition \ref{fijate} we deduce that $f(u^\ast)\in L^\frac{3}{2}(\Omega)$. By H{\"o}lder inequality we obtain $u^\ast f(u^\ast)\in L^1(\Omega)$. It follows, for every $\lambda\in (0,\lambda^\ast)$, that
$$ \int_\Omega \vert \nabla u_\lambda \vert^2=\lambda\int_\Omega u_\lambda f(u_\lambda)\leq\lambda^\ast\int_\Omega u^\ast f(u^\ast).$$
Hence $\Vert u_\lambda\Vert_{H_0^1(\Omega)}$ is uniformly bounded in $\lambda\in (0,\lambda^\ast)$. We conclude that $u^\ast \in H_0^1(\Omega)$.
\end{proof}
|
{
"timestamp": "2012-06-28T02:03:09",
"yymm": "1206",
"arxiv_id": "1206.6233",
"language": "en",
"url": "https://arxiv.org/abs/1206.6233"
}
|
\section{The {\sc Xenon1T}\ Detector}
\label{sec:xeton}
Building upon the success of the {\sc Xenon}\ detectors thus far, we will develop and deploy the next generation of detector in the program - {\sc Xenon1T}.
The {\sc Xenon1T}\ instrument can be realized by essentially scaling up the existing {\sc Xenon100}\ detector by about a factor of 10 and reducing the background by a factor of 100. This was successfully achieved in going from {\sc Xenon10}~\cite{Angle:2008} to {\sc Xenon100}~\cite{Aprile:2011instr, Aprile:2011Run08}. Employing increased levels of self-shielding and identical techniques for particle discrimination, the technologies required are largely already proven, and ongoing R$\&$D efforts address those that are not immediately transferable from {\sc Xenon100}.
The {\sc Xenon1T}\ detector is a dual-phase TPC containing 2.2~ton of pure LXe instrumented with PMTs for the simultaneous detection of scintillation light and ionization charge via proportional gas scintillation. The PMTs (Hamamatsu R11410 series) have a maximum sensitivity at the peak of the Xe scintillation emission spectrum (178 nm). Approximately 250 PMTs, 3-inch in diameter, are arranged in two closely-packed arrays: a ``bottom'' array with $\sim$120 mounted in the liquid, below the TPC drift volume, and a ``top'' array with $\sim$130 mounted in the gas above the liquid. The approximate 1.1~ton active volume is defined by a 1~m diameter cylinder that is also 1~m high, made out of PTFE (teflon) for its high reflectivity in the VUV region. Wire meshes with high optical transmission close the cylinder, and define the LXe drift volume and the gas Xe proportional region, above the liquid level. Field shaping electrodes made of copper are mounted on the outside of the PTFE cylinder and define a homogenous electric drift field (1~kV/cm) within the active volume. As in {\sc Xenon10}\ and {\sc Xenon100}, the Xe gas is liquefied and kept at the desired temperature by PTRs, coupled directly to the inner volume.
The {\sc Xenon1T}\ experiment will be mounted in Hall B at LNGS, between the ICARUS and WArP experiments. The infrastructure consists of two main elements: a 9.6~m diameter water tank as shield and active Cherenkov muon veto with 4$\pi$ coverage for the {\sc Xenon1T}\ detector, and a service building that contains the cryogenic infrastructures and purification systems, the Xe storage/recovery system and the DAQ and control electronics. These structures will be in place for the entire duration of the project, while a mobile platform and clean room will be available during the initial assembly phase in the water shield and during maintenance operations.
Detailed simulation studies informed by results from previous {\sc Xenon}\ and other LXe detectors indicate that an increase in the light yield of {\sc Xenon1T}\ relative to {\sc Xenon100}\ is achievable by adopting relatively modest modifications to the design of the TPC such as greater coverage of non-reflective surfaces with PTFE of near unity reflectivity, greater optical transmission of electrode structures and especially greater quantum efficiency (QE) of photomultiplier tubes (PMTs) at the 178~nm wavelength of Xe scintillation. At 5~m absorption length, the predicted average light yield at 122~keV$_{\n{ee}}$ is 3~photoelectrons/keV$_{\n{ee}}$ at the nominal operating field of 1~kV/cm, corresponding to a nuclear recoil threshold below that of {\sc Xenon100}.
\begin{figure}[t]
\centering
\includegraphics[width = 1.\columnwidth]{figures/complete1.pdf}
\caption{{\bf Left:} Cross sectional drawing of the {\sc Xenon1T}\ cryostat containing the PTFE that bounds the active LXe, PMTs and support structures, field shaping rings and wire-mesh electrodes.
{\bf Right:} The cryostat will be suspended at the center of an active water shield. Here the cryostat with only the central pipe is shown in the tank with the external support structure.}
\label{fig:cryostat}
\end{figure}
\section{Background and Expected Sensitivity}
\label{back_sens}
{\sc Xenon1T}\ will rely on a number of well established and proven techniques to achieve an unprecedented low background. First: the selection of every component used in the experiment will be based on an extensive radiation screening campaign, using a variety of complementary techniques and dedicated measurements as established for {\sc Xenon100}~\cite{GATORscreening:2011,GEMPI}.
Second: the self-shielding of LXe is exploited to attenuate and moderate radiation from material components within the TPC and simultaneously a fiducial volume will be defined, thanks to the TPC event imaging capability. Additionally, the increased target mass provides powerful multiple scatter rejection, identifying background events. Third: radioactive elements within the LXe (such as from Kr or Rn contamination) are reduced to a level which makes their contribution to the background negligible~\cite{Abe:2009,TAUP:2011}. Fourth: the {\sc Xenon1T}\ detector is surrounded on all sides by a 4~m thick water shield, implemented as an active Cherenkov muon veto; this shield is very effective in reducing the neutron and $\gamma$-ray background from the underground cavern rock, or from cosmic muon-induced events to negligible levels. Finally, the TPC is designed to minimize light leakage from charge insensitive regions and events with rare topologies.
The experiment aims to reduce background from all expected sources such that the fiducial mass and the low energy threshold will allow {\sc Xenon1T}\ to reach an unprecedented sensitivity, ideally matched to probe a particularly rich region of electroweak scale parameter space, with a realistic WIMP discovery potential. With 2 years live-time and $1.1\,\n{ton}$ fiducial mass, {\sc Xenon1T}\ could detect on the order of 100 dark matter events, assuming $\sigma_{\n{SI}}$ $\sim$ $10^{-45}\,\n{cm}^2$ and for a WIMP mass of $100~\,\n{GeV/c^2}$.
With such a signal, {\sc Xenon1T}\ would be able to significantly constrain the WIMP cross-section and mass. Fig.~\ref{fig:wimp_csmass_2} (Left) show the $1\sigma$ uncertainties for the interaction cross section as a function of WIMP mass, assuming $\sigma_{\n{SI}} = 10^{-45}\,\n{cm}^{2}$.
In the absence of a positive signal, the experiment aims to exclude cross-sections above $\sigma_{\n{SI}}\sim
2\times 10^{-47} \,\n{cm}^2$ at $90\%\,\n{CL}$ for $50~\,\n{GeV/c^2}$ WIMPs, Fig.~\ref{fig:wimp_csmass_2} (Right), such that the bulk of the theoretically favored parameter space for SUSY WIMPs can be excluded~\cite{Trotta:2008,Buchmueller:2011}.
\begin{figure}[t]
\includegraphics[width = .5\columnwidth]{figures/xs_mass_Xe_2tonyr_1E-45cm.pdf}
\includegraphics[width = .5\columnwidth]{figures/Xenon1Tprop_xslimits_3PEkeVee.pdf}
\caption{\small {\bf Left:} $1\sigma$ uncertainties in determining WIMP mass and $\sigma_{WIMP-N}$ from 2.2 ton-years with {\sc Xenon1T}. WIMP masses of 20, 50, 100, 200, 500 GeV/c$^{2}$ and $\sigma_{WIMP-N}$ of $10^{-45}\,\n{cm}^{2}$. {\bf Right:} Achieved and projected limits on $\sigma_{\n{SI}}$ from the {\sc Xenon100}\ and {\sc Xenon1T}\ detectors. For comparison, results from a selection of other experiments are shown along with the most likely parameter space for a detection as predicted by the Constrained Minimal Supersymmetric Extension of the Standard Model~\cite{Trotta:2008,Buchmueller:2011}.}
\label{fig:wimp_csmass_2}
\end{figure}
\input{referenc}
\end{document}
|
{
"timestamp": "2012-06-28T02:04:09",
"yymm": "1206",
"arxiv_id": "1206.6288",
"language": "en",
"url": "https://arxiv.org/abs/1206.6288"
}
|
\section*{Appendix: Optimization criteria, approximate predictive distributions and their derivatives}
From the definitions of various optimization criteria like NLP measure (\ref{nlp}) and (\ref{FMmooth}) etc, we see that the chosen measure and its derivatives can be obtained from the LOO predictive distributions $p(y_i|x_i,{\it S}_{\setminus{i}},\bftheta)$ and their derivatives ${{\partial p(y_i|x_i,{\it S}_{\setminus{i}},\bftheta)} \over {\partial \theta_j}}$. Here, $\theta_j$ is $j^{th}$ component of the hyperparameter vector $\bftheta$. Note that the derivative of the NLP measure (\ref{nlp}) is given by:
\begin{equation}
{{\partial G(\bftheta)} \over {\partial \theta_j}}\:=\:- {1\over n} \sum_{i=1}^n {1\over \Phi(y_i z_i)} {{\partial p(y_i|x_i,{\it S}_{\setminus{i}},\bftheta)} \over {\partial \theta_j}}
\label{nlpderv}
\end{equation}
and the derivative of the smoothed F-measure (\ref{FMmooth}) is given by:
\begin{equation}
{{\partial F_{\zeta}(\bftheta)} \over {\partial \theta_j}}\:=\:{{\eta(\bftheta) {{\partial A(\bftheta)} \over {\partial \theta_j}} - A(\bftheta) (1-\zeta) {{\partial m_+(\bftheta)} \over {\partial \theta_j}}} \over {\eta^2(\bftheta)}}
\label{FMoothderv}
\end{equation}
where $\eta(\bftheta)\:=\:\zeta n_{+} + (1-\zeta)m_{+}(\bftheta)$. Note that the derivatives ${{\partial A(\bftheta)} \over {\partial \theta_j}}$ and ${{\partial m_+(\bftheta)} \over {\partial \theta_j}}$ are directly dependent on ${{\partial p(y_i=+1|x_i,{\it S}_{\setminus{i}},\bftheta)} \over {\partial \theta_j}}$. Now, from (\ref{Pyi}) we see that to define the LOO predictive distributions, we need the LOO mean $\mu_{\setminus{i}}$ and variance $\sigma^2_{\setminus{i}}$. In the case of EP approximation, analytical expressions to compute these quantities are already available (see eqn. (\ref{cavmu})). Next, we give details on how the derivatives of predictive distributions can be obtained with these approximations.
\subsection*{Derivatives of Predictive Distributions}
For ease of reference we recall (\ref{Pyi}) here. $$p(y_i|x_i,{\it S}_{\setminus{i}},\bftheta)\:=\:\Phi \Bigl({{y_i(\mu_{\setminus{i}}+\gamma)} \over {\sqrt{1+\sigma^2_{\setminus{i}}}}}\Bigr).$$ Then, with $z_i\:=\:{{\mu_{\setminus{i}}+\gamma} \over {\sqrt{1+\sigma^2_{\setminus{i}}}}}$ and $N(z_i)\:=\:{1 \over \sqrt{2\pi}} \exp(-{{z^2_i} \over 2})$ we have $${{\partial p(y_i|{\bf x}_i,{\it S}_{\setminus{i}},\bftheta)} \over {\partial \theta_j}}\:=\:{{{\it N}(z_i) y_i} \over {\sqrt{1+\sigma^2_{\setminus{i}}}}}\Bigl({{\partial \mu_{\setminus{i}}}\over {\partial \theta_j}}\:-\:{1\over 2}{{z_i} \over {\sqrt{1+\sigma^2_{\setminus{i}}}}} {{\partial \sigma^2_{\setminus{i}}} \over {\partial \theta_j}}\Bigr).$$ Here, $\theta_j$ represents any element of $\bftheta$ other than $\gamma$. Similarly, we have
$${{\partial p(y_i|{\bf x}_i,{\it S}_{\setminus{i}},\bftheta)} \over {\partial \gamma}}\:=\:{{{\it N}(z_i) y_i} \over {\sqrt{1+\sigma^2_{\setminus{i}}}}}.$$
Thus, we need ${\partial \mu_{\setminus{i}}}\over {\partial \theta_j}$ and ${\partial \sigma^2_{\setminus{i}}} \over {\partial \theta_j}$. Below, we give details on how they can be obtained with the EP approximation.
\subsection*{Derivatives of LOO mean and variance with fixed site parameters: EP Approximation}
In the case of EP approximation, since we resort to EM type optimization, we derive expressions for ${{\partial \mu_{\setminus{i}}} \over {\partial \theta_j}}$ and ${{\partial \sigma^2_{\setminus{i}}} \over {\partial \theta_j}}$ assuming that the site parameters are fixed. From $\mu_{\setminus{i}}\:=\:\sigma^2_{\setminus{i}}\bigl({m_i \over {\bf C}_{ii}}-{\mu_i \over \sigma^2_i}\bigr)$, we have
$${{\partial \mu_{\setminus{i}}} \over {\partial \theta_j}}\:=\:{{\mu_{\setminus{i}}} \over {{\sigma^2}_{\setminus{i}}}} {{\partial \sigma^2_{\setminus{i}}} \over {\partial \theta_j}}\:+\:{{\sigma^2_{\setminus{i}}} \over {({\bf C}_{ii})^2}} \Bigl({\bf C}_{ii}{{\partial {\bf m}_i} \over {\partial \theta_j}}\:-\:{\bf m}_i {{\partial {\bf C}_{ii}} \over {\partial \theta_j}}\Bigr).$$ From $\sigma^2_{\setminus{i}}\:=\:(({\bf C}_{ii})^{-1}\:-\:\sigma^{-2}_i)^{-1}$, we have
$${{\partial \sigma^2_{\setminus{i}}} \over {\partial \theta_j}}\:=\: {{\sigma^4_{\setminus{i}}} \over {({\bf C}_{ii})^2}} {{\partial {\bf C}_{ii}} \over {\partial \theta_j}}.$$ Since ${\bf m}\:=\:{\bf C}{\bfSigma^{-1}}{\bfmu}$, we have ${{\partial {\bf m}} \over {\partial \theta_j}}\:=\:{{\partial {\bf C}} \over {\partial \theta_j}} {\bfSigma^{-1}}{\bfmu}$. Note that ${\bf C}$ can be re-written using Sherman-Morrison-Woodbury formula as: ${\bf C}\:=\:{\bf K}\:-\:{\bf K}({\bf K}\:+\:\bfSigma)^{-1}{\bf K}$ and it is useful to work with this expression to achieve improved numerical stability \citep{Raswil:06} as it avoids inversion of ${\bf K}$. Then we have
$${{\partial {\bf C}} \over {\partial \theta_j}}=\:({\bf I}\:-\:({\bf K}+\bfSigma)^{-1}{\bf K})^T{{\partial {\bf K}} \over {\partial \theta_j}}({\bf I}\:-\:({\bf K}+\bfSigma)^{-1}{\bf K}).$$ Note that ${{\partial {\bf C}_{ii}} \over {\partial \theta_j}}$, $i \in {\tilde I}$ (where ${\tilde I}=\{1,2,\ldots,n\}$) are nothing but the diagonal entries of the above expression. Note also that ${{\partial {\bf m}} \over {\partial \theta_j}}$ can be efficiently computed by taking advantage of the presence of the vector ${\bfSigma^{-1}}{\bfmu}$. But, to compute ${{\partial {\bf C}_{ii}} \over {\partial \theta_j}}$, $i \in {\tilde I}$ we cannot avoid the matrix multiplication with ${{\partial {\bf K}} \over {\partial \theta_j}}$; this results in $O(n^3)$ for each $\theta_j$. Finally, it is useful to re-write $({\bf K}+\bfSigma)^{-1}\:=\:\bfSigma^{-{1\over2}}({\bf I}\:+\:\bfSigma^{-{1\over2}}{\bf K}\bfSigma^{-{1\over2}})^{-1}\bfSigma^{-{1\over2}}$ \citep{Raswil:06}.
\section{Introduction}
Gaussian process (GP) models are flexible and powerful probabilistic models that
are
used to solve classification problems in many areas of application \citep{Raswil:06}. In
the
Bayesian GP setup, latent function values and hyperparameters that are involved in modeling are integrated with chosen priors. However, the required integrals are often not analytically tractable (due to various choices of likelihoods and priors) and closed form analytic expressions are not available. Two important problems in this context are finding good approximations for integrating over the latent function variables and the hyperparameters. There have been two approaches reported in the literature. In the first approach, both the latent function variables and the hyperparameters are integrated out within some approximations. \citet{Williams:98} used Laplace approximation to integrate over the latent function variables and Hybrid Monte Carlo (HMC) to integrate over the hyperparameters. \citet{Neal:98} used Gibbs sampling to integrate over the latent function variables and HMC to integrate over hyperparameters; this method is more accurate, but it is computationally expensive. In the second approach, only the latent function variables are integrated out and the hyperparameters are optimized on some well-defined objective function. This latter problem of choosing hyperparameters that define the model is essentially the Gaussian process model selection problem and in this paper we focus on this problem.
There are two commonly used approaches to address this model selection problem. They are marginal likelihood or evidence maximization and minimization of LOO-CV based average negative logarithmic predictive probability (NLP). Both these approaches are available for GP regression model selection
~\citep{Raswil:06,Sundar:01}. For GP classifier model selection, \citet{Gibbs:00} used a variational approximation method to integrate over the latent function values and estimated the hyperparameters by maximizing marginal likelihood (ML).
Laplace approximation and Expectation Propagation (EP) approximation ~\citep{Raswil:06,Seeger:03} are other methods that have been used to integrate over the latent function variables. Then, the marginal likelihood is optimized using gradient information obtained with any one of these approximations \citep{Raswil:06,Seeger:03}. \citet{Kim:06} presented an approximate Expectation-Maximization (EM) algorithm to learn the hyperparameters. In the E-step, they used EP to estimate the joint density of latent function values and in the M-step, the hyperparameters were optimized by maximizing
a variational lower bound on the marginal likelihood.
In this paper, we consider the approach of using LOO-CV based predictive distributions to address the GP classifier model selection problem. In a related work, \citet{Opper:00} used LOO error estimate to choose rough hyperparameter values by scanning a range of values. In the EP framework \citep{Minka:01}, cavity distributions directly provide LOO error estimates during training and were used to assess predictive performance and select automatic relevance determination (ARD) type hyperparameters \citep{Qi:04}. The LOO-CV based predictive distributions obtained from probabilistic least squares classifier were used in the minimization of NLP for GP classifier model selection \citep{Raswil:06}.
In practice, while measures like marginal likelihood and average negative
logarithmic
predictive probability measures are very useful, other measures like F-measure \citep{van:74} and Weighted Error Rate (WER) are also important and useful, for instance in applications like medical diagnostics, image understanding, etc, where the number of positive examples
is
much
smaller
than the number of negative examples.
Several works that use such measures for hyperparameters optimization exist in the
non-GP literature.
\citet{Hong:07} proposed
a
kernel classifier construction algorithm based on regularized orthogonal weighted least squares (ROWLS) estimation with LOO-Area Under the ROC Curve (AUC) as model selection criterion for handling imbalanced datasets. \citet{Jansche:05} proposed
the
training of
a
probabilistic classifier based on a logistic regression model by optimizing expected F-measure. Here an approximation to F-measure was made so that the F-measure is smoothed and becomes a smooth function of model weights.
\citet{Seeger:07} proposed a general framework for learning in probabilistic kernel classification models with a large or structured set of classes. He optimized the kernel parameters by minimizing the NLP over k-folds. \citet{Keerthi:06} considered the task of tuning hyperparameters in SVM models based on minimizing smooth performance validation functions like smoothed k-fold CV error.
The last three works did not use LOO-CV in their work.
This paper is aimed at addressing two issues. First, the proposed method is different from the work of \citet{Opper:00} and \citet{Qi:04} in that we optimize the hyperparameters directly in the continuous space and any standard non-linear optimization method can be used. It is also different from the LOO-CV based probabilistic LS method \citep{Raswil:06} in that we use the more accurate EP approximation than the LS approximation. Second, criteria such as F-measure and WER, which are needed for tackling imbalanced problems, have not been considered in GP classifier designs.
We define smoothed LOO-CV based measures using predictive distributions as a function of GP classifier model hyperparameters. Thus, the objective functions can be optimized using standard non-linear optimization techniques. We investigate usage of LOO-CV predictive distributions obtained from expectation propagation approximation. Actually, the proposed algorithm can also be used with Laplace approximation. However, \citet{Kuss:05} showed for binary classification problems that the EP approximation is better than the Laplace approximation. Therefore, we restrict our attention to using the EP approximation here.
We conduct experiments on two criteria: the standard average negative logarithm of predictive probability (NLP) and a smoothed version of F-measure.
On the NLP criterion we compare our method (EP-CV(NLP)) against the LOO-CV based probabilistic least squares (LS) classifier \citep{Raswil:06} and standard GP classifier doing ML maximization using EP approximation. We refer the latter two methods as LS-CV(NLP) and EP(ML) respectively; the abbreviations in parentheses refer to the type of objective functions used. The experimental results on several real world benchmark datasets show that the proposed method is better than the LS-CV(NLP) method and is quite competitive to the EP-ML method. On the F-measure criterion we compare the EP-CV(NLP) method with the EP-CV(FM) method. In the latter method we optimize over the F-measure instead of the NLP measure. We also compare with a two-step method\footnote{This method was suggested by an anonymous reviewer.} where, in the first step we optimize over the hyperparameters using the NLP measure and, in the second step we optimize {\it only} the {\it bias} hyperparameter using the F-measure. Experimental results demonstrate that this method is also inferior to the EP-CV(FM) method.
The
paper is organized as follows. We give a brief introduction to Gaussian process classification and ML optimization criteria in Section 2. In
Section 3 we give
a general set of smooth
LOO-CV based optimization criteria and illustrate their use with LOO-CV predictive distributions. Optimization aspects and
specific algorithm are given in Section 4. In
Section 5 we discuss related work on LOO-CV based GPC model selection. In
Section 6 we present experimental results and
then conclude
the paper in Section 7.
\section{Gaussian Process Classification}
In binary classification problems, we are given a training data set ${\it S}$ composed of $n$ input-target pairs $({\bf x}_i, y_i)$ where ${\bf x}_i$ $\in$ $R^D$, $y_i$ $\in$ $\{+1,-1\}$,
$i \in {\tilde I}$ and ${\tilde I}=\{1,2,\ldots,n\}$. The true function value at ${\bf x}_i$ is represented as a latent variable $f({\bf x}_i)$. The goal is to compute the predictive distribution of the class label $y_*$ at test location ${\bf x}_*$. In standard GPs for classification \citep{Raswil:06}, the latent variables $f({\bf x}_i)$ are modelled as random variables in a zero mean GP indexed by $\{{\bf x}_i\}$. The prior distribution of $\{{\bf f}({\bf X}_n)\}$ is a zero mean multivariate joint Gaussian, denoted as ${\it p}({\bf f})\:=\:{\mathcal N}({\bf 0},{\bf K})$, where ${\bf f}\:=\:[f({\bf x}_1),\ldots,f({\bf x}_n)]^T$, ${\bf X}_n\:=\:[{\bf x}_1,\ldots,{\bf x}_n]$ and ${\bf K}$ is the $n \times n$ covariance matrix whose $(i,j)^{th}$ element is $k({\bf x}_i,{\bf x}_j)$, denoted as ${\bf K}_{i,j}$. One of the most commonly used covariance
functions
is the squared exponential covariance function given by: $\mathrm{cov}(f({\bf x}_i),f({\bf x}_j))\:=\:k({\bf x}_i,{\bf x}_j)\:=\:\beta_0\:\exp(-{1 \over 2} \sum_{k=1}^D {{(x_{i,k}-x_{j,k})^2} \over {\beta_k}})$. Here, $\beta_0$ represents signal variance and
the remaining $\beta_k$'s represent width parameters across different input dimensions. Let $\bfbeta\:=\:=[\beta_0, \beta_1, \ldots, \beta_D]$. These parameters are also known as {\it automatic relevance determination} (ARD) hyperparameters. We call this covariance function the ARD Gaussian kernel function.
Next, it is assumed that the probability over class labels as a function of ${\bf x}$ depends only on the latent function value $f({\bf x})$. For the binary classification problem, given the value of $f({\bf x})$ the probability of class label is independent of all other quantities: $p(y=+1|f({\bf x}),{\it S})\:=\:p(y=+1|f({\bf x}))$ where ${\it S}$ is the dataset.
The likelihood $p(y_i|f_i)$ can be modelled in several forms
such as a sigmoidal function or cumulative normal $\Phi(y_i f_i)$ where
$\Phi(z)\:=\:\int_{-\infty}^z {1 \over {\sqrt{2\pi}}} \exp(-{{w^2} \over 2}) dw$. Assuming that the examples are i.i.d, we have ${\it p}({\bf y}|{\bf f})\:=\:\prod_{i=1}^N p(y_i|f_i;\bfgamma)$. Here, $\bfgamma$ represents hyperparameters that characterize the likelihood. The prior and likelihood along with the hyperparameters $\bftheta \:=\:[\bfbeta, \bfgamma]$ characterize the GP model. With these modelling assumptions, we can write the
inference probability
given $\bftheta$ as:
\begin{equation}
p(y_*|{\bf x}_*,{\it S},\bftheta)\:=\: \int \:p(y_*|f_*,\bfgamma) \:p(f_*|{\it S},{\bf x}_*,\bftheta)\:\:df_*
\label{pred1}
\end{equation}
Here, the posterior predictive distribution of latent function $f_*$ is given by:
$$p(f_*|{\it S},{\bf x}_*,\bftheta)\:=\:\int\:p(f_*|{\bf x}_*,{\bf f},\bfbeta) \:p({\bf f}|{\it S},\bftheta)\:d{\bf f}.$$
where $p({\bf f}|{\it S},\bftheta) \varpropto \prod_{i=1}^N p(y_i|f_i,\bfgamma)\: p({\bf f}|{\bf X},\bfbeta)$. In a Bayesian solution, the class probability at the test point $x_*$ would be obtained by integrating over the hyperparameters weighted by their posterior probability
$$ p(y_*|x_*,{\it S})\:=\:\int \:p(y_*|x_*,{\it S},\bftheta)\:p(\bftheta|{\it S}) \:d\bftheta.$$
In general there is no closed form expression available for this integral and
it
is expensive to compute. Therefore, instead of integrating over the hyperparameters,
a single set of their values
is
usually
estimated from the dataset by optimizing various criteria as mentioned earlier and
then
used in (\ref{pred1}).
\subsection{Marginal Likelihood Maximization}
Marginal likelihood or evidence maximization \citep{Raswil:06} is commonly used to estimate the hyperparameters during model selection. The marginal likelihood is given by:
\begin{equation}
p({\bf y}|{\bf X},\bftheta)\:=\:\int\:\prod_{i=1}^N p(y_i|f_i,\bfgamma)\: p({\bf f}|{\bf X},\bfbeta)\:d{\bf f}
\label{ML}
\end{equation}
This integral cannot be calculated analytically except for a special case like GP regression with Gaussian noise. Therefore, certain approximations are needed to compute these quantities. Laplace approximation and EP approximations are two popular methods used for this purpose. To gain more insight into the quality of the Laplace and EP approximations, \citet{Kuss:05} carried out comprehensive comparisons of these approximations with (the more exact) Markov Chain Monte Carlo (MCMC) sampling approach in terms of their predictive performance and marginal likelihood estimates. They found that EP is the method to be used for approximate inference in binary GPC models, when the computational cost of MCMC is prohibitive. Hence in our study we restrict ourselves to the more accurate EP approximation given in the next subsection. With the EP approximation the hyperparameters are learnt by optimizing marginal likelihood with gradient information \citep{Raswil:06, Seeger:03} using standard non-linear optimization techniques.
\subsection{Expectation Propagation}
The EP algorithm is an iterative algorithm which is used for approximate Bayesian inference \citep{Opper:00,Minka:01}. It has been applied to GP classification \citep{Raswil:06, Seeger:03}
. EP finds a Gaussian approximation $q({\bf f}|{\it S},\bftheta)\:=\:\mathcal{N}({\bf f}|{\bf m},{\bf C})$ to the posterior $p({\bf f}|{\it S},\bftheta)$ by moment matching of approximate marginal distribution and the posterior. Mean and covariance of the approximate Gaussian are given by:
\begin{equation}
{\bf m}\:=\:{\bf C}\bfSigma^{-1}{\bfmu} \:\:\:\:\:\: {\bf C}\:=\:({\bf K}^{-1}\:+\:\bfSigma^{-1})^{-1}
\label{posmean}
\end{equation}
where
${\bfmu}\:=\:(\mu_1,\mu_2,\ldots,\mu_n)^T$ and $\bfSigma\:=\:diag(\sigma^2_1,\sigma^2_2,\ldots,\sigma^2_n)$ are called site function parameters. As per the approximation, the posterior is written in terms of the site functions $t(f_i;\mu_i,\sigma^2_i,Z_i)\:=\:Z_i {\mathcal N}(f_i|\mu_i,\sigma^2_i)$ and prior $p({\bf f}|{\bf X},\bftheta)$ as
$$ q({\bf f}|{\it S},\bftheta)\:=\:{{p({\bf f}|{\bf X},\bftheta)} \over q({\it S}|\bftheta)} \prod_{i=1}^N\:t(f_i;\mu_i,\sigma^2_i,Z_i).$$ The EP algorithm iteratively visits each site function in turn, and adjusts the site parameters to match moments of an approximation to the posterior marginals. This process requires replacement of intractable exact {\it cavity} distribution with a tractable approximation based on the site functions and is given by:
$$q_{\setminus{i}}(f_i)\:=\:\int \prod_{j\neq i} t(f_j;\mu_j,\sigma^2_j,Z_j) \:p({\bf f}|{\bf X},\bftheta)\:d{\bf f}^{\setminus{i}}$$ where $q_{\setminus{i}}(f_i) \varpropto \mathcal{N}(f_i|\mu_{\setminus{i}},\sigma^2_{\setminus{i}})$ is the approximate cavity function and is related to
the
diagonal entries of the posterior $q({\bf f}|{\it S},\bftheta)$ as:
$ q_{\setminus{i}}(f_i)t(f_i;\mu_i,\sigma^2_i,Z_i) \:\varpropto \:\mathcal{N}(f_i|m_i,{\bf C}_{ii}).$ Here,
the
${\bf C}_{ii}$
represent
the diagonal entries of the matrix ${\bf C}$. Using Gaussian identities, the mean and variance of cavity distribution is related to the site parameters as:
\begin{equation}
\mu_{\setminus{i}}\:=\:\sigma^2_{\setminus{i}}\bigl({m_i \over {\bf C}_{ii}}-{\mu_i \over \sigma^2_i}\bigr) \:\:\:\:\:\:\:\:\:\sigma^2_{\setminus{i}}\:=\:\bigl(({\bf C}_{ii})^{-1}-\sigma^{-2}_{i}\bigr)^{-1}
\label{cavmu}
\end{equation}
Then, EP adjusts the site parameters $\mu_i$, $\sigma^2_i$ and $Z_i$ such that the approximate posterior marginal using the exact likelihood approximates the posterior marginal based on the site function well. That is, $q_{\setminus{i}}(f_i)\:p(y_i|f_i) \simeq q_{\setminus{i}}(f_i)\:t(f_i;\mu_i,\sigma^2_i,Z_i)$. This is done by matching the zeroth, first and second moments on both sides. Thus, the EP algorithm iteratively updates site parameters until convergence.
Though there is no convergence proof,
in practice
the EP algorithm converges
in most cases. See \citet{Raswil:06} for more details.
Next, within some constant, the marginal likelihood with EP approximation \citep{Raswil:06} is given by:
\begin{equation}
\log q({\bf y}|{\bf X},\bftheta)\:=\:-{1\over2}\log|{\bf K}+\bfSigma|\:-{1\over 2}\bfmu^T({\bf K}+\bfSigma)^{-1}\bfmu\:+\:\sum_{i=1}^n\log w_i
\label{EPML}
\end{equation}
where $w_i=\Phi(z_i)\exp\Bigl({{(\mu_{\setminus{i}}-\mu_i)^2} \over {2(\sigma^2_{\setminus{i}}+\sigma^2_i)}}\Bigr)\sqrt{\sigma^2_{\setminus{i}}+\sigma^2_i}$ and $z_i={{y_i \mu_{\setminus{i}}} \over \sqrt{{1+\sigma^2_{\setminus{i}}}}}$.
The
hyperparameters are optimized using gradient expressions with standard conjugate gradient or quasi-Newton type non-linear optimization techniques.
\section{Leave-One-Out Cross Validation based Optimization Criteria}
In this section, we
give definitions of various LOO-CV based optimization criteria.
In section 4
we give details on how these measures can be optimized using standard nonlinear optimization
techniques.
The LOO predictive distributions $p(y_i|{\bf x}_i,{\it S}_{\setminus{i}},\bftheta)$, $i\in\tilde I$
play a crucial role.
Here
${\it S}_{\setminus{i}}$ represents the dataset without $i$th example.
Their exact computation is expensive. In section 4 we will also discuss how to approximate them efficiently.
\subsection{NLP Measure}
The averaged negative logarithm of predictive probability (NLP) is defined as:
\begin{equation}
G(\bftheta)\:=\:-{1\over n}\sum_{i=1}^n \:\log\:p(y_i|x_i,{\it S}_{\setminus{i}},\bftheta)
\label{nlp}
\end{equation}
This
LOO-CV based
measure is generic and has been used in the context of probabilistic LS classifiers
(see section 5)
and GP regression \citep{Raswil:06, Sundar:01}.
\subsection{Smoothed LOO Measures}
While
measures such as
marginal likelihood (\ref{EPML})
and NLP in (\ref{nlp}) are useful for normal situations,
other measures like F-measure
and WER
are important, for example,
when dealing with
imbalanced datasets.
Let us now define these measures.
Consider the binary classification problem with class labels +1 and -1. Assume that there are $n_+$ positive examples and $n_{-}$ negative examples. In general, the performance of the classifier may be evaluated using counts of data samples $\{a, b, c, d\}$ defined via the confusion matrix given in Table 1.
Let $n_{+}\:=\:a+b$ and $n_{-}\:=\:c+d$. The true positive rate (TP) is the proportion of positive data samples that were correctly classified (that is, true positives) and the false positive rate (FP) is the proportion of negative data samples that were incorrectly classified (that is, false positives) \citep{Hong:07}. These rates are given by: TP$\:=\:{a \over {a+b}}\:=\:{a \over n_{+}}$ and FP$\:=\:{c \over{c+d}}\:=\:{c \over {n_{-}}}$.
The misclassification rate is given by: MCR$\:=\:{{b+c} \over n}$. Note that the true positive rate is also known as {\it Recall} (R). {\it Precision} is another important quantity defined as: P$\:=\:{a \over {a+c}}$.
Now let us consider the imbalanced data case and assume that $n_{-}\gg n_{+}$. In this case if MCR is minimized then the classifier will be biased toward the negative class due to its effort in minimizing the false positives (that is c) more strongly than minimizing false negatives (that is b). In the worst case almost all the positive examples will be wrongly classified, that is $a\rightarrow0$ . This results in both $P\rightarrow 0$ and $R\rightarrow 0$. Thus MCR is not a good measure to use when the dataset is imbalanced. This problem can be addressed by optimizing other measures that we discuss next.
The F-measure is one such measure and is defined \citep{van:74} as:
$$F_{\zeta}(P,R)\:=\:\Bigl({\zeta \over R}+{{1-\zeta}\over P}\Bigr)^{-1}$$
\begin{table}
\caption{Confusion Matrix for Binary classification}
\vskip 0.05in
\begin{small}
\begin{center}
\begin{tabular}{|l|c|c|} \hline
& Positive (Predicted) & Negative (Predicted) \\ \hline
Positive (Actual) & a & b \\ \hline
Negative (Actual) & c & d \\ \hline
\end{tabular}
\end{center}
\end{small}
\end{table}
where $0\le \zeta \le 1$ and $0\le F_{\zeta}(P,R)\le 1$. It has been used in various applications like document retrieval \citep{van:74} and text classification \citep{Joachims:05}. It is particularly preferable over MCR when the dataset is highly imbalanced \citep{Joachims:05,Jansche:05}.
Let us get into more details on the functioning of the F-measure. When $\zeta \rightarrow 0$, we get $F_{\zeta}(P,R) \rightarrow P$. Then optimizing the F-measure means we are interested {\it only} in maximizing the {\it Precision}. On the other hand, when $\zeta \rightarrow 1$, we get $F_{\zeta}(P,R) \rightarrow R$. In this case we are interested {\it only} in maximizing the {\it Recall}. The user can choose an appropriate value for $\zeta$ depending on how much of importance he/she wants to give to the precision and recall. Thus, the F-measure combines precision and recall into a single optimization criterion by taking their $\zeta$-weighted harmonic mean. In the imbalanced data case mentioned above MCR minimization can potentially result in $F_{\zeta}(P,R)\rightarrow 0$. By maximizing the F-measure we can prevent the classifier from being completely biased towards the negative class. Note that $F_{\zeta}(P,R)$ can be re-written in terms of $a$, $b$ and $c$ as:
\begin{equation}
F_{\zeta}(a,b,c)\:=\:{a \over {a+\zeta b+(1-\zeta) c}}
\label{Fabcmeasure}
\end{equation}
In all our experiments we set $\zeta\:=\:0.5$. In this case, it becomes $F_{0.5}(P,R)\:=\:{{2PR} \over {P+R}}$ and can also be written as: $F_{0.5}(a,b,c)\:=\:{1 \over {1+{{b+c}\over {2a}}}}$. Then, maximizing $F_{0.5}(a,b,c)$ is equivalent to maximizing ${a} \over {b+c}$. Thus, we can maximize $F_{0.5}(a,b,c)$ by both minimizing the error (that is, $b+c$) and maximizing the true positives. The trade-off kicks-in since maximizing the true positives tends to increase the false positives.
Thus maximizing the F-measure controls both the true positives and the error appropriately. In general the F-measure summarizes a classifier's ability to identify the positive class and plays an important role in the evaluation of binary classifier. As a criterion for optimizing hyperparameters
F-measure can be computed
on
an evaluation or validation dataset.
However, in practical situations involving small datasets\footnote{GP models are known to be particularly valuable for problems with small datasets.} it is wasteful to employ a separate evaluation set.
The
LOO-CV approach would be useful in such situations and we show next how the F-measure can be estimated with such approach.
\citet{Hong:07} estimated $TP$ and $FP$ as:
$$\widehat{TP}\:=\:{1 \over {n_+}} \sum_{i=1}^n T({\hat y}_{\setminus{i}}y_i,y_i),$$
$$\widehat{FP}\:=\:{1 \over {n_-}} \sum_{i=1}^n F({\hat y}_{\setminus{i}}y_i,y_i).$$
Here ${\hat y}_{\setminus{i}}$ represents predicted label for $i$th sample. Therefore ${\hat y}_{\setminus{i}}y_i$ takes value $+1$ when the prediction matches with the actual label and $-1$ otherwise. $T(u,v)$ is an indicator function which is 1 if $u=1$ and $v=1$. Similarly, $F(u,v)$ is one if $u=-1$ and $v=-1$. Otherwise, these functions take zero values.
\citet{Hong:07}
used these estimates to compute $\widehat{AUC}\:=\:{{1+\widehat{TP}-\widehat{FP}} \over 2}$ as an approximation of AUC and used this criterion to select
a
subset of basis vectors in
a
kernel classifier model construction procedure for imbalanced datasets. Note that this definition of AUC is applicable only for a hard classifier (fixed non-probabilistic classifier) with binary outputs. See \citet{Hong:07} for more details. In a strict sense such a definition of AUC is not suitable for a probabilistic classifier like GP classifier that provides continuous probabilistic output.
However, we can make use of this approach of defining $TP$ and $FP$ as above to compute the quantities $a$, $b$ and $c$ that are needed to evaluate the F-measure
in (\ref{Fabcmeasure}). There are two issues associated with these estimates. The first issue is
that
these estimates are not
smooth (in fact, not even continuous)
functions
of hyperparameters. Therefore they cannot be used directly in any
approach that uses gradient-based nonlinear optimization methods to tune the hyperparameters.
Secondly, these estimates do not use predictive probability values which is particularly important when we want to take variance also into account.
In non-GP contexts
\citet{Jansche:05} and \citet{Keerthi:06} addressed the first issue by defining smoothed F-measure or
other
validation functions by replacing the indicator function with a sigmoid
function,
which makes the optimization criterion as a
smooth
function of hyperparameters. However, they did not consider
a
LOO approach
and used a validation set instead.
\citet{Jansche:05} considered maximum {\it a posterior} probabilities and \citet{Keerthi:06} used sigmoidal approximations for SVM models. Here, we propose to combine LOO based estimation and
smoothed version of the quantities $\{a, b, c, d\}$
denoted as $A(\bftheta)$, $B(\bftheta)$, $C(\bftheta)$ and $D(\bftheta)$.
We can set
\begin{equation}
A(\bftheta)\:=\: \sum_{i:y_i=+1}\:p(y_i=+1|x_i,{\it S}_{\setminus{i}},\bftheta)
\label{Atheta}
\end{equation}
Since $n_{+}=a+b$, we can write $B(\bftheta)\:=\:n_{+}-A(\bftheta)$. With $m_{+}$ denoting the number of examples predicted as positive, we can parameterize it as $m_{+}(\bftheta)\:=\:A(\bftheta)+C(\bftheta)$. This can be rewritten as:
\begin{equation}
m_{+}(\bftheta)\:=\:\sum_{i=1}^n p(y_i=+1|x_i,{\it S}_{\setminus{i}},\bftheta)
\label{Mtheta}
\end{equation}
Thus, the smoothed F-measure can be defined from (\ref{Fabcmeasure}) as:
\begin{equation}
F_{\zeta}(\bftheta)\:=\:{{A(\bftheta)} \over {\zeta n_{+} + (1-\zeta) m_{+}(\bftheta)}}
\label{FMmooth}
\end{equation}
Note that $D(\bftheta)$ can be defined in a similar fashion as $m_{-}(\bftheta)\:=\:B(\bftheta)+D(\bftheta)$. Using these quantities, other derived quantities like $TP(\bftheta)$ and $FP(\bftheta)$ can be defined as LOO based estimates. Then, smoothed LOO estimates of WER can be obtained as shown below.
The WER measure is another useful measure for imbalanced datasets. Using the quantities defined above, its smoothed version can be written as:
\begin{equation}
WER(\bftheta;\tau)\:=\:{{n_{+}(1-TP(\bftheta))+\tau n_{-} FP(\bftheta)} \over {n_{+}+\tau n_{-}}}.
\label{WER}
\end{equation}
where $\tau$ is the ratio of the cost of mis-classifications of the negative class to that of the positive class and $0\le \tau \le 1$. Thus by choosing a suitable $\tau$ value for a given problem and optimizing over the hyperparameters we can design classifiers without becoming biased toward one class. Note that for ease of notation we have omitted hat on $TP(\cdot)$ and $FP(\cdot)$.
Following the work of \citet{Hong:07} one can also define
\begin{equation}
AUC(\bftheta)\:=\:{{1+TP(\bftheta)-FP(\bftheta)} \over 2}
\label{AUC}
\end{equation}
and optimize over the hyperparameters. As mentioned earlier, such a definition is not suitable for the GP classifier. Nevertheless it is interesting to note that it has the desirable property of trading-off between high TP and low FP. Also, on comparing this definition of AUC with (\ref{WER}) we see that they are related in the sense that maximizing AUC is same as minimizing WER when $\tau=1$ {\it and} $n_{+}\:=\:n_{-}$.
Overall we see that the LOO-CV predictive distributions can be used to define various criteria that are
smooth
functions of hyperparameters resulting in smoothed LOO-CV measures. Now given that the LOO-CV predictive distributions are readily available from the EP algorithm, we can optimize the various smoothed LOO-CV measures directly using standard non-linear optimization techniques.
\section{EP-CV Algorithm for Choosing Hyperparameters}
Various criteria
such as
(\ref{nlp}), (\ref{FMmooth}), (\ref{WER}) and (\ref{AUC})
depend
on the hyperparameters $\bftheta$ via the predictive distributions
$p(y_i|x_i,{\it S}_{\setminus{i}},\bftheta)$.
With cumulative Gaussian likelihood, they can be written as:
\begin{equation}
p(y_i|x_i,{\it S}_{\setminus{i}},\bftheta)\:=\:\Phi\Bigl({{y_i(\mu_{\setminus{i}}+\gamma)} \over {\sqrt{1+\sigma^2_{\setminus{i}}}}}\Bigr)
\label{Pyi}
\end{equation}
Note that (\ref{Pyi}) is obtained from (\ref{pred1}) with $p(f_{i}|x_i,S_{\setminus{i}},\bftheta)=\mathcal{N}(\mu_{\setminus{i}},\sigma^2_{\setminus{i}})$. The hyperparameter $\gamma$ is referred to as the bias parameter and it helps in shifting the decision
boundary with the probability value ${1\over2}$. In general, the bias hyperparameter $\gamma$ is very useful \citep{Raswil:06, Seeger:03} and can be optimized.
\subsection*{EP Approximation}
To compute (\ref{Pyi}) we need the LOO mean $\mu_{\setminus{i}}$ and variance
$\sigma^2_{\setminus{i}}$ $\forall i$.
With the EP approximation, they can be computed using (\ref{cavmu}).
Full details of gradient calculations needed for implementing hyperparameter optimization are given in the appendix.
We take the expectation-maximization (EM) type approach for hyperparameters optimization.
This is because gradient expressions involving implicit derivatives (with site parameters varying as a function of hyperparameters) are not available due to the iterative nature of
the EP algorithm. This approach results in the following algorithm.
\subsubsection*{\bf {EP-CV Algorithm}:}
\begin{enumerate}
\item Initialize the hyperparameters $\bftheta.$
\item {\bf Perform E-Step}: Given the hyperparameters, we find the site parameters ${\bfmu}$ and $\Sigma$ and the posterior $q({\bf f}|{\it S},\bftheta)\:=\:\mathcal{N}({\bf m},{\bf C})$ using EP algorithm.
\item {\bf Perform M-Step}: Find the hyperparameters $\bftheta$ by optimizing over any LOO-CV based measure like (\ref{nlp}), (\ref{FMmooth}), (\ref{WER}) or (\ref{AUC}) using any standard gradient based optimization technique. We carry out just one line search in this optimization process. During this line search as the hyperparameters change, we perform the following sequence of operations.
\begin{enumerate}
\item Compute the posterior mean ${\bf m}$ and covariance ${\bf C}$ using (\ref{posmean}).
\item Compute the LOO mean $\mu_{\setminus{i}}$ and variance $\sigma^2_{\setminus{i}}$ using (\ref{cavmu}).
\item Compute the chosen objective function like (\ref{nlp}), (\ref{FMmooth}), (\ref{WER}) or (\ref{AUC}) and its derivatives.
\end{enumerate}
Note that through out this M-step, it is assumed that the site parameters are fixed and the values obtained from step (2) are used.
\item repeat steps 2-3 until there is no significant change in the objective function value.
\end{enumerate}
This algorithm
worked
well in our experiments.
A similar
EM approach was used by \citet{Kim:06} (which they called EM-EP algorithm) in the optimization of a lower bound on the marginal likelihood.
\begin{figure}
{
\includegraphics[width=7cm]{car3.eps}
}
\hspace{1cm}
{
\includegraphics[width=7cm]{yeast7.eps}
}
\caption{F-Measure Optimization on Car3 and Yeast7 Datasets}
\label{fig1}
\end{figure}
Since the EP-CV algorithm optimizes the smoothed F-measure it is useful to study the behavior of the true F-measure as optimization proceeds. We do this study on two of the datasets described in Table 6 of section 6. The optimization algorithm was terminated when there was no significant change in the smoothed F-measure value. From Figure 1 we see that the smoothed F-measure monotonically increases in both cases. Also as expected, the true F-measure exhibits a non-smooth behavior expected of a discrete function, and also, the values of true and smoothed F-measures are not the same. The difference arises because the smoothed F-measure is based on probabilistic scores, which can take any value between 0.5 and 1 depending on the problem (even when correct classification occurs). The important point to observe is that, in general, there is an increasing trend in true F-measure value as the optimization progresses. In the case of Car3 dataset (left panel) we see that clearly. A similar trend is seen in the case of Yeast7 dataset (right panel) also, except for a small dip at the 10th iteration. Though such a behavior happens sometimes in early iterations, we observed that better true F-measure value is almost always obtained as the optimization progresses.
\subsection*{Computational and Storage Complexities}
The computational complexity of the EP-CV algorithm depends on the
number of ARD kernel parameters $D$. See appendix for more details. For a given problem with $D$ fixed, the complexity is $O(n^3)$. This complexity is same as that of the EP(ML) method (see equation (\ref{EPML})) and LS-CV(NLP) method given in the next section.
Also in many practical problems a single global scaling hyperparameter for all the input features is sufficient. Finally, the storage complexities of all the methods are $O(n^2)$.
\section{Other LOO-CV based Methods}
Having discussed our approach in detail it is useful to recall and discuss other LOO-CV based GPC model selection methods in relation to it.
\citet{Opper:00} derived a mean field algorithm for binary classification with GPs based on the TAP approach originally proposed in
the
statistical physics of disordered systems. They showed that this approach yields an approximate LOO estimator for the generalization error.
This estimate is equivalent to the
LOO-CV
error estimate obtained from EP \citep{Minka:01}.
Instead
of optimizing over the hyperparameters,
\citet{Opper:00} used the LOO-CV error count (using indicator functions) to choose rough hyperparameter values by scanning a range of values.
\citet{Qi:04} used the LOO-CV error estimate obtained from EP to determine ARD hyperparameters. They worked with the Gaussian process classifier where each input feature is associated with a weight parameter $v_i$ with the prior $\mathcal{N}(0,{\zeta_i}^{-1})$. The hyperparameters $\zeta_i^{-1}$
were obtained by
{\it maximizing the evidence}
using a fast sequential update based on the work of \citet{Faul:02}. The outcome of this optimization is that many $\zeta_i$s would go to infinity such that only a few nonzero weights $v_i$ will be present.
Even though the ARD hyperparameters were optimized by maximizing the evidence,
to prevent
overfitting
\citet{Qi:04} proposed to select the final model as the one that gives the minimum LOO-CV error count or probability. As the LOO-CV error count is discrete, they chose the model with maximum evidence when there is a tie in the count. Compared to this approach, we work with
the
GP classifier model (without the weight parameters) detailed in Section 2 and optimize over the hyperparameters (including ARD) directly with various LOO-CV based measures (including F-measure, WER etc.) using gradient information.
In this context,
the
LOO-CV based probabilistic LS classifier \citep{Raswil:06} is a
more
direct LOO-CV based GPC model selection approach.
For the sake of completion we give some details here and
later
compare our algorithms with this approach
in our experiments. This approach treats classification as a regression problem.
Note that the probabilistic interpretation of LS criterion implies
a
Gaussian noise model. But the output ${\bf y}$ can take only $+1$ or $-1$ which is
slightly
odd. However, this approach is simple to implement and a probabilistic interpretation is given by passing the predictions through a sigmoid.
Specifically, the LOO mean $\mu_{\setminus{i}}$ and variance $\sigma^2_{\setminus{i}},\:i\in {\tilde I}$ are obtained from LOO-CV formulation of GP regression and the predictive distributions are obtained
via
(\ref{Pyi}). Here, $\mu_{\setminus{i}}$ and $\sigma^2_{\setminus{i}}$ are given by:
\begin{equation}
\mu_{\setminus{i}}\:=\:y_i\:-\:{\tilde \alpha}_i \sigma^2_{\setminus{i}} \:\:\:\:\:\:\sigma^2_{\setminus{i}}\:=\:{1 \over {{\bar {\bf K}}_{ii}}}
\label{lsmu}
\end{equation}
where ${\tilde \bfalp}\:=\:{\bar {\bf K}}{\bf y}$ and ${\bar {\bf K}}\:=\:({\bf K}+\lambda{\bf I})^{-1}$.
Here $\lambda$ can
either
be set to a small
positive
value or
treated as a regularization hyperparameter with
a small upper bound
constraint. This is useful when ${\bf K}$ can become ill-conditioned during optimization. Finally, the hyperparameters are optimized using
(\ref{nlp}). We call this method as LS-CV(NLP).
\begin{table}
\caption{Various methods and their descriptions. All the EP-CV methods are optimized using EP-CV algorithm.}
\vskip 0.05in
\begin{small}
\begin{sc}
\begin{center}
\begin{tabular}{|l|l|} \hline
Method & Description \\ \hline
EP(ML) & Marginal likelihood maximization within EP approximation. \\
& That is, optimize (\ref{EPML}) over $\bftheta$. \\ \hline
EP-CV(NLP) & Negative Logarithmic Predictive loss minimization within EP approximation. \\
& That is, optimize (\ref{nlp}) over $\bftheta$. For ease of notation, this \\
& method is referred as {\bf NLP} in Table 7. \\ \hline
LS-CV(NLP) & Negative Logarithmic Predictive loss minimization within Least Squares \\
& approximation as described in Section 5. Optimize (\ref{nlp}) over $\bftheta$. \\ \hline
EP-CV(FM) & F-Measure maximization within EP-CV approximation. \\
& That is, optimize (\ref{FMmooth}) over $\bftheta$. For ease of notation, this \\
& method is referred as {\bf FM} in Table 7. \\ \hline
NLP-FM(BIAS) & In the first step, hyperparameters are optimized using EP-CV(NLP) method. \\
& In the second step, only the bias parameter ($\gamma$) is optimized using \\
& EP-CV(FM) method.We also refer this method as two-step method. \\ \hline
\end{tabular}
\end{center}
\vskip -0.2in
\end{sc}
\end{small}
\end{table}
\begin{table}
\caption{Data sets description: NLP Experiment. Here, $n$, $D$, $p$ and $nr$ represent the numbers of training examples, input dimension, test examples and train/test partitions respectively.}
\vskip 0.05in
\begin{small}
\begin{sc}
\begin{center}
\begin{tabular}{|l|l|l|l|l|} \hline
Dataset & $n$ & $D$ & $p$ & $nr$ \\ \hline
Banana & 400 & 2 & 4900 & 100 \\ \hline
Breastcancer & 200 & 9 & 77 & 100 \\ \hline
Diabetes & 468 & 8 & 300 & 100 \\ \hline
German & 700 & 20 & 300 & 100 \\ \hline
Heart & 170 & 13 & 100 & 100 \\ \hline
Image & 1300 & 18 & 1010 & 20 \\ \hline
Ringnorm & 400 & 20 & 7000 & 100 \\ \hline
Splice & 1000 & 60 & 2175 & 20 \\ \hline
Thyroid & 140 & 5 & 75 & 100 \\ \hline
Titanic & 150 & 3 & 2051 & 100 \\ \hline
Twonorm & 400 & 20 & 7000 & 100 \\ \hline
Waveform & 400 & 21 & 4600 & 100 \\ \hline
\end{tabular}
\end{center}
\vskip -0.2in
\end{sc}
\end{small}
\end{table}
\begin{table}
\begin{center}
\caption{NLP Performance}
\vskip 0.1in
\begin{small}
\begin{sc}
\begin{tabular}{|l|c|c|c|} \hline
Dataset/Method & EP(ML) & EP-CV(NLP)& LS-CV(NLP)\\ \hline
Banana & 23.90 $\pm$ 0.81 & 24.26 $\pm$ 1.06 & 33.88 $\pm$ 1.89 \\ \hline
Breastcancer & 53.57 $\pm$ 4.75 & 54.12 $\pm$ 5.27 & 55.58 $\pm$ 5.03 \\ \hline
Diabetes & 47.74 $\pm$ 1.96 & 47.97 $\pm$ 2.09 & 50.72 $\pm$ 2.13 \\ \hline
German & 48.67 $\pm$ 2.74 & 49.05 $\pm$ 2.76 & 50.51 $\pm$ 2.30 \\ \hline
Heart & 40.16 $\pm$ 5.36 & 40.03 $\pm$ 5.00 & 45.11 $\pm$ 4.91 \\ \hline
Image & 8.26 $\pm$ 1.07 & 8.45 $\pm$ 0.97 & 22.70 $\pm$ 0.66 \\ \hline
Ringnorm & 16.88 $\pm$0.93 & 16.56 $\pm$ 1.01 & 28.48 $\pm$ 0.75 \\ \hline
Solar & 57.25 $\pm$ 1.38 & 57.35 $\pm$ 1.42 & 59.61 $\pm$ 1.34 \\ \hline
Splice & 28.48 $\pm$ 0.88 & 29.60 $\pm$ 0.79 & 36.83 $\pm$ 0.42 \\ \hline
Thyroid & 10.21 $\pm$ 3.76 & 9.94 $\pm$ 3.69 & 25.33 $\pm$ 4.86 \\ \hline
Titanic & 66.86 $\pm$ 1.97 & 51.73 $\pm$ 1.73 & 53.78 $\pm$ 14.08 \\ \hline
Twonorm & 8.31 $\pm$ 0.88 & 9.08 $\pm$ 1.97 & 25.94 $\pm$ 0.53 \\ \hline
Waveform & 23.01 $\pm$ 0.89 & 22.97 $\pm$ 0.67 & 32.63 $\pm$ 0.59 \\ \hline
\end{tabular}
\end{sc}
\end{small}
\end{center}
\end{table}
\begin{table}
\begin{center}
\caption{Test Set Error Performance}
\vskip 0.1in
\begin{small}
\begin{sc}
\begin{tabular}{|l|c|c|c|} \hline
Dataset/Method & EP(ML) & EP-CV(NLP)& LS-CV(NLP)\\ \hline
Banana & 10.41 $\pm$ 0.65 & 10.51 $\pm$ 0.50 & 10.93 $\pm$ 0.67 \\ \hline
Breastcancer & 26.52 $\pm$ 4.89 & 26.61 $\pm$ 4.80 & 25.94 $\pm$ 4.59 \\ \hline
Diabetes & 23.28 $\pm$ 1.82 & 23.41 $\pm$ 1.82 & 24.30 $\pm$ 2.51 \\ \hline
German & 23.36 $\pm$ 2.11 & 23.48 $\pm$ 2.00 & 23.94 $\pm$ 2.33 \\ \hline
Heart & 16.65 $\pm$ 2.87 & 16.62 $\pm$ 3.08 & 17.91 $\pm$ 4.21 \\ \hline
Image & 2.82 $\pm$ 0.54 & 2.77 $\pm$ 0.51 & 2.74 $\pm$ 0.65 \\ \hline
Ringnorm & 4.41 $\pm$ 0.64 & 4.29 $\pm$ 0.69 & 5.05 $\pm$ 0.99 \\ \hline
Solar & 34.20 $\pm$ 1.75 & 34.27 $\pm$ 1.80 & 35.03 $\pm$ 1.89 \\ \hline
Splice & 11.61 $\pm$ 0.81 & 11.85 $\pm$ 0.83 & 11.83 $\pm$ 0.80 \\ \hline
Thyroid & 4.37 $\pm$ 2.19 & 4.20 $\pm$ 2.17 & 6.97 $\pm$ 3.78 \\ \hline
Titanic & 22.64 $\pm$ 1.34 & 22.50 $\pm$ 0.98 & 22.99 $\pm$ 2.81 \\ \hline
Twonorm & 3.05 $\pm$ 0.34 & 3.19 $\pm$ 0.51 & 3.43 $\pm$ 0.43 \\ \hline
Waveform & 10.10 $\pm$ 0.48 & 9.95 $\pm$ 0.48 & 11.70 $\pm$ 0.88 \\ \hline
\end{tabular}
\end{sc}
\end{small}
\end{center}
\end{table}
\begin{table}
\caption{Data sets description: F-Measure Experiment. Here, $n$, $D$, $p$, $nr$ and $PPE$ represent the numbers of training examples, input dimension, test examples, train/test partitions and approximate percentage of positive examples respectively.}
\vskip 0.05in
\begin{small}
\begin{sc}
\begin{center}
\begin{tabular}{|l|c|c|c|c|c|} \hline
Dataset & $n$ & $D$ & $p$ & $nr$ & $PPE$\\ \hline
Yeast7 & 297 & 8 & 1187 & 50 & 2 \\ \hline
Yeast5 & 320 & 8 & 1164 & 50 & 4 \\ \hline
Car3 & 350 & 6 & 1378 & 50 & 4 \\ \hline
Ecoli5 & 124 & 7 & 212 & 50 & 6 \\ \hline
Yeast4 & 165 & 8 & 1319 & 50 & 11 \\ \hline
Breastcancer & 200 & 9 & 77 & 100 & 29 \\ \hline
German & 700 & 20 & 300 & 100 & 30 \\ \hline
Diabetes & 468 & 8 & 300 & 100 & 35 \\ \hline
\end{tabular}
\end{center}
\vskip -0.2in
\end{sc}
\end{small}
\end{table}
\begin{table}
\begin{center}
\caption{F-Measure Performance}
\vskip 0.1in
\begin{small}
\begin{sc}
\begin{tabular}{|l|c|c|c|c|} \hline
Dataset/Method & NLP & FM & NLP-FM(bias) \\ \hline
yeast7 & 32.24 $\pm$ 15.54 & 42.58 $\pm$ 7.84 & 40.85 $\pm$ 8.59 \\ \hline
yeast5 & 19.79 $\pm$ 12.85 & 32.85 $\pm$ 8.56 & 28.45 $\pm$ 10.57 \\ \hline
car3 & 55.89 $\pm$ 9.30 & 64.35 $\pm$ 8.51 & 62.67 $\pm$ 8.49 \\ \hline
ecoli5 & 84.41 $\pm$ 6.25 & 83.79 $\pm$ 5.90 & 84.08 $\pm$ 5.86 \\ \hline
yeast4 & 71.35 $\pm$ 3.95 & 73.64 $\pm$ 2.97 & 73.23 $\pm$ 2.81 \\ \hline
BreastCancer & 38.42 $\pm$ 10.55 & 47.34 $\pm$ 6.44 & 46.98 $\pm$ 6.37 \\ \hline
German & 54.15 $\pm$ 4.17 & 57.53 $\pm$ 2.95 & 56.91 $\pm$ 2.87 \\ \hline
Diabetes & 62.69 $\pm$ 3.49 & 66.23 $\pm$ 2.87 & 66.12 $\pm$ 2.67 \\ \hline
\end{tabular}
\end{sc}
\end{small}
\end{center}
\end{table}
\section{Experiments}
We conducted two experiments with various methods. See Table 2 for a summary of the various methods. In the first experiment we compared the performance of EP-CV(NLP) method with that of EP(ML) and LS-CV(NLP) methods. In the second experiment we compared the performance of EP-CV(FM) method with that of EP-CV(NLP) and two step classifier methods. We used the {\it minimize} Matlab routine of the GPML Matlab code available at {\it http://www.gaussianprocess.org/gpml/code/matlab/doc/} for hyperparameters optimization. In all the experiments we used a single global scaling hyperparameter.
\subsection{NLP Experiment}
In this experiment we used the thirteen benchmark datasets available in the web{\footnote {\it http://ida.first.fraunhofer.de/projects/bench/benchmarks.htm}} summarized in Table 3. Let us first consider the results from the first experiment given in Table 4 and Table 5. For the EP(ML) method we used the GPML Matlab code available in the web{\footnote {\it http://www.gaussianprocess.org/gpml/code/matlab/doc/}}.
We conducted Friedman test \citep{Demsar:06} with the corresponding post-hoc tests for comparison of classifiers over multiple datasets. The comparison over multiple datasets requires a performance score of each method on each dataset \citep{Demsar:06}. Here, we consider the mean over the partitions of a given dataset as the performance score. As pointed out in \citet{Demsar:06}, it is not clear how to make use of the standard deviation information when the datasets are not independent over the partitions. The Friedman test ranks the methods for each dataset separately based on the chosen performance score (mean performance in our case). The best performing method gets the rank of 1, the second best rank 2 and so on. In case of ties, average ranks are assigned. The Friedman test checks whether the measured average ranks (over the datasets) are significantly different from the mean rank under the null hypothesis. Under the null hypothesis all the methods are equivalent and so their ranks should be equal.
In the case of NLP performance measure, the measured average ranks for the EP(ML), EP-CV(NLP) and LS-CV(NLP) methods were
$1.46$, $1.62$ and $2.92$ respectively. With three methods and 13 datasets, the F-statistic comparison at a significance level of 0.05 rejected the null hypothesis. Since the null hypothesis was rejected we conducted the Nemenyi post-hoc test for pairwise comparisons. This test revealed that the results of the EP(ML) and EP-CV(NLP) methods are better than the LS-CV(NLP) method at the significance level of $0.05$. On the other hand, the post-hoc test did not detect any significant difference in the results of EP(ML) and EP-CV(NLP) methods. In the case of test set performance measure, the measure averaged ranks for the EP(ML), EP-CV(NLP) and LS-CV(NLP) methods were $1.62$, $1.77$ and $2.62$ respectively. Note that the average rank of LS-CV method has improved on the test set error performance. Here again, the null hypothesis is rejected at the same significance level and the post-hoc test did not detect any significant difference in the results of EP(ML) and EP-CV(NLP) methods. The results of the EP(ML) and EP-CV(NLP) methods are better than the LS-CV(NLP) method at the significance level of $0.05$ and $0.1$ respectively. Thus, we can conclude that EP(ML) and EP-CV(NLP) are competitive to each other. Further, both these methods perform better than the LS-CV(NLP) method in this experiment.
We can also make other observations from the tables. We note that the NLP performance of the LS-CV(NLP) method is quite inferior on several datasets even though its test set error performances on most of these datasets (except {\it waveform and thyroid}) are relatively closer. Further, some kind of group behavior can be seen. For example, the NLP scores are high on {\it titanic}, {\it breast-cancer}, {\it diabetes}, {\it German} and {\it flare-solar}. Also, the test set errors are $>20\%$ on these datasets. Consequently, we may consider these datasets as difficult ones. From Table 4 we observe that the NLP performance of LS-CV(NLP) is {\it closer} to the other two methods on these datasets (compared to its performance on other datasets). Next we can order the remaining datasets {\it heart}, {\it splice}, {\it banana}, {\it waveform}, {\it ringnorm}, {\it thyroid}, {\it twonorm} and {\it image} in terms of descending difficulty. Note that the difference in the NLP performance seems to have an increasing trend as the dataset becomes easier. To understand this we looked at the predictive probabilities of these methods on both correctly and wrongly classified examples. In the case of thyroid dataset these average probability scores for the LS-CV(NLP) method were $0.84$ (for correct classification) and $0.35$ (wrong classification). Here, the averaging was done over all the correctly(wrongly) classified examples over all the partitions. On the other hand, the corresponding values for the EP-CV(NLP) were $(0.96, 0.39)$. In the case of banana dataset, these scores were $(0.79, 0.35)$ and $(0.92, 0.29)$ for the LS-CV(NLP) and EP-CV(NLP) methods respectively. In the case of German dataset, they were $(0.75, 0.33)$ and $(0.79, 0.32)$. The scores for the EP-ML method were very close to that of the EP-CV(NLP) method. In general, we observed that the predictive probability estimates from the LS-CV(NLP) method were relatively poor and resulted in poor NLP performance. We looked at the hyperparameter estimates of the different methods and observed that the LS-CV(NLP) method takes smaller width and signal variance hyperparameter values on most of the datasets except on the difficult datasets (mentioned above) compared to the other two methods. Apart from this we did not observe any specific pattern in the hyperparameter values chosen by these methods. Looking at the hyperparameter estimates of EP(ML) and EP-CV(NLP), it seems that several solutions in the space of hyperparameters that give close performances are possible.
\subsection{F-measure Experiment}
The datasets used in this experiment are described in Table 6. The datasets {\it yeast}, {\it car} and {\it ecoli} are multi-class datasets and we converted them into binary classification datasets by considering examples belonging to the class label indicated by the number (for example, $7$ in yeast7) as positive class respectively and treating the rest of the examples as negative class. These datasets are available in the web {\footnote {\it ftp://ftp.ics.uci.edu/pub/machine-learning-databases/}}. We created 50 partitions for these datasets in a stratified manner reflecting the class distributions.
Let us consider the results from the second experiment given in Table 7. In this experiment all the results were obtained within the EP-CV framework. The first and second columns represent results obtained using NLP and smoothed F-measure (i.e., eq.~(\ref{FMmooth})) as the optimization criterion respectively. The third column represents results obtained from the two step classifier described earlier. We looked at the hyperparameter estimates of the different methods. We observed that the bias estimates of the two step classifier were somewhat closer (within $10\%$) to those of the smoothed F-measure method on the {\it breast-cancer}, {\it diabetes} and {\it German} datasets. On the remaining datasets they were different by more than $40\%$. The width and signal variance hyperparameter estimates were also quite different. From Table 7, we observe that the two step method is also good and gives closer performance to the smoothed F-measure method on several datasets. Further analysis of the performance results revealed that even though the standard deviations are high, the smoothed F-measure method gave better performance than the two step method on majority of the partitions on several datasets. We believe that the larger standard deviations in the results arises from the sensitivity to the dataset with lesser number of positive examples. To carry out the statistical significance tests, again we used the mean (over the partitions) as the performance score for each of the methods. The measured average ranks for these three methods were $2.75$, $1.25$ and $2.00$ respectively. With three methods and 8 datasets, the F-statistic comparison at a significance level of 0.05 rejected the null hypothesis. The Nemenyi post-hoc test for pairwise comparisons revealed that {\it only} the results of smoothed F-measure is better than the NLP based method at the significance level of 0.05. In this experiment the post-hoc test did not detect any significant differences in the comparisons of smoothed F-measure method with the two step method and the two step method with the NLP method. However in these two comparisons the rank differences were closer to the required critical differences at the significance level of 0.1. We also observed that if we were to conduct Wilcoxon signed-rank test on these methods (as if we were comparing only two classifiers) then the results were statistically significant at the significance level of 0.05 for all the three pairs. In summary, the results demonstrate the usefulness of direct optimization of smoothed F-measure.
\section{Conclusion}
In this paper, we considered the problem of Gaussian process classifier model selection with different LOO-CV based optimization criteria and provided a practical algorithm using LOO predictive distributions with criteria like standard NLP, smoothed F-measure and WER to select hyperparameters. More specifically, apart from optimization of standard NLP, we demonstrated its usefulness in direct optimization of smoothed F-measure, which is useful to handle imbalanced data. We considered predictive distribution arrived from the Expectation Propagation (EP) approximation. We derived relevant expressions and proposed a very useful EP-CV algorithm. The experimental results on several real world benchmark datasets showed comparable NLP generalization performance (with NLP optimization) with existing approaches. We demonstrated that the smoothed F-measure optimization method is a very useful method that improves the F-measure performance significantly. Overall, the EP-CV algorithm is an excellent choice for GP classifier model selection with different LOO-CV based optimization criteria.
\vskip 0.2in
|
{
"timestamp": "2012-06-27T02:05:18",
"yymm": "1206",
"arxiv_id": "1206.6038",
"language": "en",
"url": "https://arxiv.org/abs/1206.6038"
}
|
\section{Introduction}
The compact H\,{\sc ii}\ regions residing in the Magellanic Clouds
are interesting in the context of massive star formation in these neighboring
galaxies. Typical Magellanic Cloud H\,{\sc ii}\ regions are giant complexes of ionized gas
with sizes of several arc minutes, corresponding to physical scales of more than
50\,pc and are powered by a large number of exciting stars. In contrast,
Magellanic Cloud compact H\,{\sc ii}\ regions are small
regions mostly $\sim$\,5$''$\hspace*{-.1cm}\, to 10$''$\hspace*{-.1cm}\, in diameter,
corresponding to $\sim$\,1.5 to 3.0\,pc and excited by a much smaller number
of massive stars. There are two types of compact H\,{\sc ii}\ regions,
high-excitation blobs \citep[HEBs, for a review see][]{MHM10b}
and low-excitation blobs \citep[LEBs,][]{Meynadier07}.
The members of the first group are often observed lying adjacent or projected onto
giant H\,{\sc ii}\ regions and are younger than the associated giant H\,{\sc ii}\ regions.
Do HEBs indeed belong
to the same region of the Magellanic Clouds at which the giant H\,{\sc ii}\ regions have formed or
is the association between these two types of H\,{\sc ii}\ regions a line-of-sight effect?
If they are associated, are HEBs powered by triggered, second-generation massive
stars? Why has star formation not proceeded in a single burst although
massive stars are believed to form in the dense core of giant molecular clouds?
These are some interesting questions, the answers to which
will be helpful for better understanding massive star formation in the Magellanic Clouds.
A problem is that HEBs are not numerous, and moreover, few of them have been
studied individually in detail. \\
This paper is devoted to a first detailed study of two compact H\,{\sc ii}\ regions, one in
the Large Magellanic Cloud (LMC) H\,{\sc ii}\ region N191 and the
other in the Small Magellanic Cloud (SMC) N77
\citep[][]{Henize56}.
Among the LMC H\,{\sc ii}\ regions listed by \citet[][]{Henize56}, N191 is one of the
outermost, lying below the bar, at a distance of $\sim$\ 200$'$\hspace*{-.1cm}\ ($\sim$\ 3 kpc in projection)
from the famous 30 Doradus. N191 appears as an elongated structure,
with two components N191A and N191B in the Henize catalog.
Here we essentially study the brightest component N191A, also known as
DEM L 64b \citep[][]{Davies76}.
N77 is one of the most northern H\,{\sc ii}\ regions of the
SMC; it is situated at a distance of $\sim$\ 25$'$\hspace*{-.1cm}\ ($\sim$\ 440 pc in projection)
from the pre-eminent SMC
H\,{\sc ii}\ region N66 \citep[][and references therein]{MHM10a}.
SMC N77 is identified in the optical survey of
\citet[][]{Davies76} as DEM S 117. \\
Few works have been devoted to these two H\,{\sc ii}\ regions despite their interesting characteristics.
LMC N191 belongs to the OB association LH 23 \citep[][]{Lucke70}. It was also detected as
IRAS source 05051-7058 \citep[][]{Helou88}.
The compact H\,{\sc ii}\ region SMC N77 seems to coincide with the stellar association B-OB 24
\citep[]{Battinelli91}. It was identified in the infrared as IRAS source 01011-7209
\citep[][]{Helou88} and as source \#48 in the ISO 12\,$\mu$m catalog \citep[][]{Wilke03}.
Furthermore, LMC N191 and SMC N77 were part of a Spitzer study of compact H\,{\sc ii}\ regions
by \citet[][]{Charmandaris08} and have been included in several radio continuum surveys of
the Magellanic Clouds \citep[][]{Filipovic95,Filipovic02}.
Both compact H\,{\sc ii}\ regions are associated with molecular clouds.
The giant molecular cloud LMC N J0504-7056 is centered at 130$''$\hspace*{-.1cm}\ south of N191
\citep[][]{Fukui08}. Moreover, the OB association LH 23 and the H\,{\sc ii}\ region are related to
this molecular cloud \citep[][]{Fukui08,Kawamura09}.
A small molecular cloud has been detected near the position of SMC N77
\citep[][]{Mizuno01}. \\
This paper is arranged as follows. Section 2 presents the observations,
data reduction, and the archive data (Spitzer data, 2MASS data).
Section 3 describes our results (overall view, extinction, nebular emission,
stellar content and chemical abundances).
Section 4 presents our discussion, and finally our conclusions are summarized in Section 5.
\section{Observations and data reduction}
\subsection{NTT imaging}
LMC N191 and SMC N77 were observed on 28 September 2002 using the
ESO New Technology Telescope (NTT) equipped with the active optics and
the Superb Seeing Imager \citep[SuSI2;][]{D'Odorico98}.
The detector consisted of two CCD
chips, identified as ESO \#45 and \#46. The two resulting frames were
automatically combined to produce a single FITS file, while the space
between the two chips was ``filled'' with some overscan columns so that
the respective geometry of the two chips was approximately
preserved. The gap between the chips corresponds to $\sim$\,100 true CCD
pixels, or $\sim$\,8$''$\hspace*{-.1cm}. The file format was 4288\,$\times$\,4096
pixels, and the measured pixel size 0$''$\hspace*{-.1cm}.0805 on the sky. Each
chip of the mosaic covered a field of 5$'$\hspace*{-.1cm}.5\,$\times$\, 2$'$\hspace*{-.1cm}.7. We refer to the
ESO manual SuSI2 for more technical information. \\
Nebular imaging was carried out using the narrow-band filters centered
on the emission lines H$\alpha$\, (ESO \#884), H$\beta$\, (\#881), and
[O\,{\sc iii}]\,(\#882). N191 was observed with three exposures of 180 sec for each filter.
N77 was also observed with exposures of 180 sec: five exposures in H$\alpha$\ and three
H$\beta$\ and [O\,{\sc iii}]\ exposures. The image quality was quite good during the night,
the seeing was 0$''$\hspace*{-.1cm}.8.
We constructed the line-ratio maps H$\alpha$/H$\beta$\, and [O\,{\sc iii}]/H$\beta$\
from nebular imaging. We also took exposures using filters ESO
\#811 ($B$), \#812 ($V$), and \#813 ($R$) with unit
exposure times of 15 sec for $B$ and $V$ and 10 sec for $R$,
respectively. The exposures for each filter were repeated twice
using ditherings of 5$''$\hspace*{-.1cm}\,--10$''$\hspace*{-.1cm}\, for bad pixel
rejection. \\
PSF-fitting photometry was obtained for all filters using the DAOPHOT package under
IRAF\footnote{http://iraf.noao.fr}. The magnitudes were then calibrated using the photometric
calibration package photcal. To perform this calibration, seven standard stars,
belonging to two Landolt photometric groups SA\,92 and T\,Phe
\citep[][]{Landolt92} were observed at
four different airmasses. This led to the determination of the photometry coefficients
and zero-points. Those coefficients agree well with the indicative
values displayed on the SuSI2 web page.\\
The aperture corrections were calculated as
follows. Starting from one of the frames, we subtracted
all stars except those used for determining the PSF
with the daophot.substar procedure, using our preliminary
DAOPHOT photometry and the corresponding PSF. This led
to a frame with only a few bright, isolated stars plus residues
from the subtraction. We then performed both aperture and
PSF-fitting photometry on those stars, using the same aperture
as for standard stars. The comparison led to aperture corrections
of 0.02, 0.04, and 0.03 mag in $B$, $V$, and $R$, respectively.\\
During the photometry process, some slight discrepancies
between the intensity of the frames were found: this effect was
considered to be the consequence of episodic variations in the sky
transparency by 7$\%$ at most. To avoid introducing a systematic
underestimation of star magnitudes when averaging the frames, we decided to
perform photometry on each individual frame.\\
By cross-correlating the positions of the sources in the various photometry
files, we obtained the mean magnitude (average of the 2 mag of each filter) and a
decent estimator of the uncertainty in this magnitude (difference between maximum
and minimum magnitudes). Finally, the process yielded the
photometry of 644 stars for the LMC N191 field and
236 stars for that of SMC N77 in all
three filters. This difference of the number of sources is partly due to the limit magnitude
of the photometry ($\sim$\ 21 mag for LMC N191, $\sim$\ 20 mag for SMC N77).
It is better for LMC N191 than for SMC N77 because of the better sky conditions.
The results for the brightest stars
toward LMC N191 and SMC N77 are presented in Table\,\ref{tab:stars_n191}. The whole photometry is
available in electronic form.
\subsection{NTT spectroscopy}
The EMMI spectrograph \citep[][]{Dekker86}
attached to the ESO NTT telescope was used on 29
September 2002 to obtain several long-slit stellar spectra.
The grating was \#\,12 centered on 4350\,\AA\, (BLMRD mode)
and the detector was a Tektronix CCD TK1034 with 1024$^{2}$ pixels of
size 24 $\mu$m. The covered wavelength range was 3810-4740\,\AA\,
and the dispersion 38\,\AA\,mm$^{-1}$, giving {\sc fwhm}
resolutions of $2.70\pm0.10$ pixels or $2.48\pm0.13$\,\AA\, for a 1$''$\hspace*{-.1cm}.0 slit.
At each position, we took three 10-min exposures. The instrument response was derived
from observations of the calibration stars LTT\,7379, LTT\,6248, and LTT\,7987.
The seeing condition was 0$''$\hspace*{-.1cm}.8 ({\sc fwhm}). The identifications of the stars
along the slits
were based on monitor sketches drawn during the observations. \\
Furthermore, EMMI was used on 28 September 2002 to obtain nebular
spectra with gratings \#\,8 (4550-6650\,\AA) and \#\,13 4200-8000) in the REMD mode
and with grating \#\,4 (3650-5350\,\AA) in the BLMD mode. In the REMD mode,
the detector was CCD \#\,63,
MIT/LL, 2048\,$\times$\,4096 pixels of 15$^{2}\,\mu$m$^{2}$ each.
Spectra were obtained with the slit set in
east-west and north-south orientations using a basic exposure time
of 300 sec repeated several times. The seeing conditions varied around
0$''$\hspace*{-.1cm}.7.
Reduction and extraction of spectra were performed using the IRAF software
package. Fluxes were derived from the extracted spectra with the
IRAF task SPLOT. The line fluxes were measured by fitting
Gaussian profiles to the lines as well as by simple pixel integration in
some cases. The nebular line intensities were corrected for interstellar
reddening using the formulae given by \citet{Howarth83} for the LMC extinction,
which is very similar to that of the SMC in the visible.
The intensities of the main nebular lines
are presented in Table \ref{tab:flux} where $F(\lambda)$ and $I(\lambda)$ represent
observed and de-reddened line intensities. The uncertainties are
indicated by capital letters: A $<$\,10\%, B=10--20\%, C=20--30\%, and D
$>$\,30\%. \\
\subsection{Archive Spitzer and 2MASS data}
We used data obtained with the Infrared Array Camera (IRAC)
on board the Spitzer Space Telescope to build composite images of
LMC N191 and SMC N77 and also to carry out their photometry.
The observations of the LMC were part of the SAGE-LMC survey
(PI M. Meixner, PID = 20 203, see \citet[][]{Meixner06}), while the SMC
data belong to the S$^{3}$MC
project (PI A. Bolatto PID = 3316, see \citet[][]{Bolatto07}). \\
The typical PSF of the IRAC images in the
3.6, 4.5, 5.8, and 8.0 $\mu$m bands is 1$''$\hspace*{-.1cm}.66 to 1$''$\hspace*{-.1cm}.98.
The derived photometry for LMC N191 in the 3.6, 4.5, 5.8, and 8.0 $\mu$m
bands are 10.50, 9.48, 8.03, and 6.47 mag \citep{Charmandaris08},
respectively, using an integration
aperture of 3 pixels, or 3.6$''$\hspace*{-.1cm}\, in radius
\citep{Charmandaris08}. Using the same aperture the IRAC photometry for
SMC N77 in the 3.6, 4.5, 5.8, and 8.0 $\mu$m
bands are 13.86, 13.87, 12.65, and 10.64 mag \citep{Charmandaris08}. Measurements with
either slightly larger or smaller apertures do not affect the color results. \\
We also used the {\it JHK} photometry provided by the 2MASS point source catalog
(http://tdc-www.harvard.edu/catalogs/tmpsc.html), as presented in
Table\,\ref{tab:stars_n191}.
Note that the embedded stars in the H\,{\sc ii}\ region LMC N191A
(i.e. \#1, \#2, \#3, \#4 and \#5) are not resolved in 2MASS data, so
the {\it JHK} photometry of the star N191-1 corresponds to the whole N191A
compact H\,{\sc ii}\ region. The same is true for the {\it JHK} photometry
of SMC N77-1, which corresponds to the whole N77A H\,{\sc ii}\ region.
\begin{figure*}[]
\centering
\includegraphics[width=0.7\hsize]{19706fg1a.eps}
\includegraphics[width=0.7\hsize]{19706fg1b.eps}
\caption{Large Magellanic Cloud H\,{\sc ii}\ region N191.
{\it Upper panel}: Composite three-color image showing the two components
A and B. The main component, N191A, is situated at
$\alpha$\,=\,05h\,04m\,38s and $\delta$\,=\,-70$^{\circ}$\hspace*{-.1cm}\,\,54$'$\hspace*{-.1cm}\,41$''$\hspace*{-.1cm}.
This image, taken with the ESO NTT/SuSI2, is a coaddition of narrow-band
filters H$\alpha$\ (red), [O\,{\sc iii}]\ (green), and H$\beta$\ (blue).
The field size, 153$''$\hspace*{-.1cm}\ $\times$\,153$''$\hspace*{-.1cm}\, ($\sim$\ 37\,$\times$\,37 pc), is a
close up of an original image covering a field of 319$''$\hspace*{-.1cm}\,\,$\times$\,327$''$\hspace*{-.1cm}\,
(78\,$\times$\,79 pc). North is up and east to the left.
{\it Lower panel}: The same field through the broad-band filter
$V$. The brightest stars of the field and the young stellar object candidates
are labeled (Tables \ref{tab:stars_n191} and \ref{tab:phot_yso}).
}
\label{fig:n191}
\end{figure*}
\begin{figure*}[]
\centering
\includegraphics[width=0.5\hsize]{19706fg2.eps}
\caption{Composite image of the LMC N191 region obtained with Spitzer IRAC.
The 4.5 $\mu$m band is represented in blue, the 5.8 $\mu$m band in yellow, and
the 8.0 $\mu$m band in red. The field size and orientation are the same
as for Fig.\,\ref{fig:n191}.
}
\label{fig:n191_spitzer}
\end{figure*}
\begin{figure*}[]
\centering
\includegraphics[width=0.7\hsize]{19706fg3a.eps}
\includegraphics[width=0.7\hsize]{19706fg3b.eps}
\caption{
Small Magellanic Cloud H\,{\sc ii}\ region N77.
{\it Upper panel}: A composite three-color image showing the two components
A and B. The main component, N77A, is situated at
$\alpha$\,=\,01h\,02m\,49s and $\delta$\,=\,-71$^{\circ}$\hspace*{-.1cm}\,\,53$'$\hspace*{-.1cm}\,18$''$\hspace*{-.1cm}.
This image, taken with the ESO NTT/SuSI2, is the coaddition of narrow-band
filters H$\alpha$\ (red), [O\,{\sc iii}]\ (green), and H$\beta$\ (blue).
The field size 153$''$\hspace*{-.1cm}\ $\times$\,153$''$\hspace*{-.1cm}\, ($\sim$\ 45\,$\times$\,45 pc).
is a close up of an original image covering a field of 319$''$\hspace*{-.1cm}\,\,$\times$\,327$''$\hspace*{-.1cm}\,
(93\,$\times$\,95 pc). North is up and east to the left.
{\it Lower panel}: The same field through the broad-band filter
$V$. The brightest stars of the field and the young stellar object candidates
are labeled (Tables \ref{tab:stars_n191} and \ref{tab:phot_yso}).}
\label{fig:n77}
\end{figure*}
\begin{figure*}[]
\centering
\includegraphics[width=0.5\hsize]{19706fg4.eps}
\caption{Composite image of the SMC N77 region obtained with Spitzer IRAC.
The 4.5 $\mu$m band is represented in blue, the 5.8 $\mu$m band in yellow, and
the 8.0 $\mu$m band in red. The field size is the same as for Fig.\,\ref{fig:n77}.
North is up and east to the left.}
\label{fig:n77_spitzer}
\end{figure*}
\section{Results}
\subsection{Overall view}
The images taken with the NTT telescope (Sect. 2.1) have a
whole area of $\sim$\,5$'$\hspace*{-.1cm}\,$\times$\,5$'$\hspace*{-.1cm}\, corresponding to
$\sim$\,73 pc\,$\times$\,73 pc for a distance of $\sim$\,50 kpc (LMC) or
$\sim$\,90 pc\,$\times$\,90 pc for a distance of $\sim$\,60 kpc (SMC) \citep[][]{Laney94}. \\
The LMC N191 H\,{\sc ii}\ region \citep[][]{Henize56} consists of two components A and B in
the optical, as displayed in Fig.\,\ref{fig:n191}. The brighter component A contains
a compact H\,{\sc ii}\ region on which we focus here. Component B is comparatively very diffuse.
The Spitzer observations show a richer nebulous region (Fig.\,\ref{fig:n191_spitzer})
compared to the optical, the brightest object of which is the compact H\,{\sc ii}\ region N191A.
The Spitzer observations
also uncover two relatively bright, compact nebulae situated north of component A.
These two objects correspond to star \#13 (Table\,\ref{tab:stars_n191}) and a
young stellar object (YSO)
candidate (Sect. 4). The component N191B is very weak or almost nonexistent in the Spitzer
bands. It is crossed by a curl of gas emanating from the N191A region.
The bright source, seen in the middle of the curl and called \#14,
has no noticeable optical counterpart.
It is in fact an ``extreme AGB star'', as detected in the SAGE Survey and is cataloged
as SSTISAGE1C J050426.95-705351.6 \citep[][]{Vijh09}. \\
The compact H\,{\sc ii}\ region N191A has a
mean angular radius, ($\theta_{\alpha}.\theta_{\delta})^{1/2}$, of 5$''$\hspace*{-.1cm}.2 corresponding
to a radius of 1.2 pc. Broad-band images in $B$, $V$, and $R$ (Fig.\,\ref{fig:n191}, lower panel)
reveal seven stars (\#1 to \#7) within less than 6$''$\hspace*{-.1cm}\ of the compact H\,{\sc ii}\ region,
whose positions and photometry are listed in Table\,\ref{tab:stars_n191}.
We will show that the central star \#1 is the exciting source of the H\,{\sc ii}\
region (see Section 2.4).\\
The SMC N77 field presents a different situation in the optical for the
diffuse nebula. N77 is composed of two components
A and B (Fig.\,\ref{fig:n77}).
N77A contains two stars (\#1 and \#2) separated by only 1$''$\hspace*{-.1cm}.3.
This compact region of ionized gas appears as a sphere of radius $\sim$\ 10$''$\hspace*{-.1cm}\ ($\sim$\ 2.9 pc) split
into two lobes by a dust lane that runs along an almost east-west direction.
The two lobes intersect about 1$''$\hspace*{-.1cm}\ south of star \#2.
Stars \#1 and \#2 are situated in the northern lobe, which
is brighter in the Balmer lines H$\alpha$\ and H$\beta$\ and also in the [O\,{\sc iii}]\ line.
The H$\alpha$\ image shows faint emission around star \#4 (Fig.\, \ref{fig:n77})
situated $\sim$\ 26$''$\hspace*{-.1cm}\, southwest of N77A. This nebula is known as N77B in the Henize catalog.
The positions and photometry of stars \#1 and \#2 are listed in Table\,\ref{tab:stars_n191}.
The Spitzer observations (Fig.\,\ref{fig:n77_spitzer}) display a curved structure focused
on N77 in which the two lobes merge, but this is probably only because of
low resolution. N77B is very
dim in the Spitzer image. Fig.\,\ref{fig:n77_spitzer} also indicates three YSO
candidates that will be discussed in Sect. 4.
\subsection{Extinction}
The map of the H$\alpha$\,/\,H$\beta$\ Balmer decrement confirms that the H\,{\sc ii}\ region LMC N191 is heavily
affected by interstellar dust. The H$\alpha$\,/\,H$\beta$\ ratio is on average 7.0 (A$_{V}$ = 2.7 mag) and
up to 10.0 (A$_{V}$ = 3.8 mag) in the most extincted area.
The extinction toward star \#1 can be derived from a
second method. O-type stars have an intrinsic color of {\it B\,--\,V}\,=\,-0.28 mag
\citep[][]{Martins06}. This yields a color excess of {\it E(B\,--\,V)}\,=\,0.39 mag
or a visual extinction of A$_{V}$ = 1.2 mag. This value
is lower than the result from the Balmer decrement because
the central regions of the H\,{\sc ii}\ region are less affected by extinction.
The dust has been more dispersed along the line of sight of star \#1. Moreover, the extinction
toward LMC N191 was estimated
by a third method using radio continuum observations. N191 appears as the
source B0505-7058 in the Parkes radio continuum survey at 2.45, 4.75 and 8.55 GHz, which had
beam-sizes of
8$'$\hspace*{-.1cm}.85, 4$'$\hspace*{-.1cm}.8 and 2$'$\hspace*{-.1cm}.7, respectively \citep[][]{Filipovic95}.
The resulting extinction, A$_{V}$ = 2.4 mag, is comparable with that obtained
using the previously mentioned methods in the optical range.\\
The average value of the Balmer decrement toward the H\,{\sc ii}\ region SMC N77 is
about 3.1, corresponding to A$_{V}$ = 0.25 mag.
The most extincted part of the H\,{\sc ii}\ region is its western border, along the dust lane
(see Section 3.1), where the H$\alpha$\,/\,H$\beta$\ ratio reaches a value of 4.5 (A$_{V}$ = 1.4 mag).
We also derived the extinction from the radio observations of SMC N77.
High-resolution observations of this object were obtained by \citet[][]{Filipovic02}, who
used the Australia Telescope Compact
Array (ATCA) in radio continuum emission at 1.42, 2.37, 4.80 and 8.64 GHz with
synthesized beams of 98$''$\hspace*{-.1cm} , 40$''$\hspace*{-.1cm} , 30$''$\hspace*{-.1cm}\ and 15$''$\hspace*{-.1cm} , respectively.
The resulting extinction, A$_{V}$ = 0.56 mag, is comparable with that inferred
using the Balmer decrement.
\subsection{Nebular emission}
The total H$\beta$ fluxes of the compact H\,{\sc ii}\ regions LMC N191 and SMC N77 were derived
using the following method. First we calculated the relative H$\beta$ flux in an
imaginary 1$''$\hspace*{-.1cm}\, slit passing through the H$\beta$ image with respect to
the total flux emitted by the whole H\,{\sc ii}\ region. This value was then
compared with the absolute flux obtained from the spectra. The total H$\beta$ flux thus obtained for N191 was
$F$(H$\beta$ )\,=\,1.52\,$\times$\,10$^{-12}$ erg cm$^{-2}$ s$^{-1}$.
Studies of the extinction in the LMC and the SMC reveal
reddening laws that are similar to the average Galactic law for the optical
and near-IR wavelengths \citep[][]{Howarth83,Prevot84,Bouchet85}.
The reddening coefficient, derived from the mean H$\alpha$\,/\,H$\beta$\ ratio of 7,
was {\it c}(H$\beta$\,) = 1.26. Considering the extinction law for the LMC \citep{Howarth83},
we computed the reddening corrected intensity
$I$(H$\beta$ )\,=\,2.77\,$\times$\,10$^{-11}$ erg cm$^{-2}$ s$^{-1}$. \\
This flux corresponds to a Lyman continuum flux of 1.78\,$\times$\,10$^{49}$
photons s$^{-1}$ for the star, assuming that the H\,{\sc ii}\ region is ionization-bounded.
The exciting star needed to provide this flux should have an effective temperature of
$\sim$\,41000 K, corresponding to a spectral type about O5\,V,
for Galactic metallicity \citep[][]{Martins05}.
However, this flux is probably underestimated since the H\,{\sc ii}\ region is likely not
completely ionization-bounded. \\
Similarly, the total H$\beta$ flux obtained for N77 was
$F$(H$\beta$ )\,=\,2.03\,$\times$\,10$^{-12}$ erg cm$^{-2}$ s$^{-1}$.
Considering the extinction law for the LMC \citep{Howarth83}, the corrected flux was
$I$(H$\beta$ )\,=\,2.61\,$\times$\,10$^{-12}$ erg cm$^{-2}$ s$^{-1}$
with the reddening coefficient {\it c}(H$\beta$\,) = 0.11.
This flux value corresponds to a Lyman continuum flux of 2.4\,$\times$\,10$^{48}$
photons s$^{-1}$ for the star.
The exciting star needed is of spectral type about O8\,V,
for Galactic metallicity \citep[][]{Martins05}.
This may be a lower limit, however, because of photon loss in a density-bounded H\,{\sc ii}\ region.\\
Several of the derived physical parameters of the compact H\,{\sc ii}\ regions are summarized in
Table\,\ref{tab:param}. The mean angular radius of the H\,{\sc ii}\ region, corresponding to
the FWHM of cross-cuts through the H$\alpha$\ image, is given in Col. 2. The corresponding
physical radius, obtained using distance moduli of {\it m\,-\,M} = 18.53 mag for LMC N191
and {\it m\,-\,M} = 18.94 mag for SMC N77 \citep[][]{Laney94} is presented
in Col. 3. The reddening coefficient, derived from the mean H$\alpha$\,/\,H$\beta$\ ratio,
is listed in Col. 4. It corresponds to the whole H\,{\sc ii}\ region. It is different from the value
found from the nebular spectrum (Table\,\ref{tab:flux})
because, in contrast, the spectrum belongs to a particular position and therefore
does not cover the whole region.
The de-reddened H$\beta$\ flux obtained from the reddening coefficient is given in Col. 5
and the corresponding H$\beta$\ luminosity in Col.6. The electron temperature is given in Col. 7.
For N77A the electron temperature is calculated from the forbidden-line ratio
[O\,{\sc iii}]\,$\lambda$ $\lambda$\,4363/(4959 + 5007), with an uncertainty of 4\%. For N191A,
the [O\,{\sc iii}]\,$\lambda$\,4363 is not observed in our spectra
so we used the electron temperature calculated from the forbidden-line ratio
[O\,{\sc ii}]\,$\lambda$ $\lambda$\,(3726 + 3729)/(7323 + 7330), with an uncertainty of 10\%, higher than the
estimate from the [O\,{\sc iii}]\ ratio.
The electron density, estimated from the ratio of the [S\,{\sc ii}]\ doublet $\lambda$ $\lambda$\,6717/6731,
is presented in Col. 8. It is accurate to $\sim$\,80\%.
It is well-known that the [S\,{\sc ii}]\ lines characterize the low-density
peripheral zones of H\,{\sc ii}\ regions.
Column 7 gives the rms electron density, {\it $<$n$_{e}$$>$},
calculated from the H$\beta$\ flux, the radius, and
the electron temperature, {\it T$_{e}$}, assuming that the H\,{\sc ii}\ region is
an ionization-bounded Str\"omgren sphere. Furthermore,
the total mass of the ionized gas, calculated from the {\it $<$n$_{e}$$>$}
with the previously noted Str\"omgren sphere assumption is presented in Col. 9. The ionization
is produced by Lyman continuum photon flux given in Col. 10.
\begin{table*}
\caption{Some physical parameters of the compact H\,{\sc ii}\ regions SMC N77 and LMC N191}
\label{tab:param}
\begin{tabular}{l c c c c c c c c c c}
\hline\hline
Object & $\theta$ & $r$ & $c$(H$\beta$ ) & $I$(H$\beta$ ) & $L$(H$\beta$ ) & {\it Te} & {\it Ne}$^{\dag}$ & {\it $<$n$_{e}$$>$}
& {\it M$_{gas}$} & $N_{L}$ \\
& ($''$\hspace*{-.1cm}\,) & (pc) & & erg s$^{-1}$ cm$^{-2}$ & erg s$^{-1}$ & (K) & cm$^{-3}$ & cm$^{-3}$ & ($M_{\odot}$ ) & ph s$^{-1}$ \\
& & & & $\times$\,10$^{-12}$ & $\times$\,10$^{36}$ & & & & & $\times$\,10$^{48}$ \\
\hline
LMC N191A & 5.2 & 1.2 & 1.26 & 27.7 & 8.3 & 10800 & 440 & 600 & 140 & 17.8 \\
SMC N77A & 10 & 2.9 & 0.11 & 2.6 & 1.1 & 14240 & 40 & 60 & 200 & 2.4 \\
\hline
\end{tabular} \\
$\dag$ Estimated from the [S\,{\sc ii}]\ ratio.
\end{table*}
\subsection{Stellar content}
\subsubsection{LMC N191}
The images show some 15 relatively bright stars lying within 10$''$\hspace*{-.1cm}\, of N191A.
The brightest component of this group, star \#1, has $V$ = 14.46 mag and it is
followed by stars \#6 and \#4 with $V$ = 16.28 and 16.60 mag, respectively.
Assuming an intrinsic color of {\it B\,--\,V}\,=\,-0.28 mag for O-type stars
\citep[][]{Martins06} and a distance modulus of 18.53 mag, the absolute magnitude
of star \#1 is M$_{V}$ = -5.27 mag. Following the calibration of \citet[][]{Martins05} for
Galactic stars, if the star is on the main sequence, it would be of a spectral type O5\,V with
a mass of 40 $M_{\odot}$ . This agrees well with what was found based on the stellar
Lyman continuum derived from the H$\beta$\, emission of the H\,{\sc ii}\ region (see Sect. 3.3). \\
We obtained spectra of two stars of the N191 region in our program of stellar spectroscopy
(Sect. 2.2), in particular star \#1, for the first time.
However, in spite of the relatively good seeing conditions,
extracting uncontaminated spectra is not straightforward. The compact
H\,{\sc ii}\ region has strong emission lines, in particular those of He\,{\sc{i}} , that fill in the
absorption lines of the embedded stars. Nevertheless, we classified \#N191-1 as O8.5\,V with
a good accuracy (Fig.\,\ref{fig:spectres_n191}). The spectral classification was performed
using the criteria stated by \citet[][]{Walborn90}.
This is colder than indirect estimates and more uncertain
(absolute magnitude and stellar Lyman continuum). However, we cannot exclude that
the spectrum of N191A-1 is not contaminated by unresolved close companions of later type than
star \#1. See Sect. 4 for discussion. \\
The spectrum of star \#N191-8, the brightest object of the field, with $V$ = 13.79 mag,
lying $\sim$\,30$''$\hspace*{-.1cm}\ west of star \#1, outside N191A, is also presented in
Fig.\,\ref{fig:spectres_n191}.
Using the same classification criteria \citep[][]{Walborn90}, we can assign spectral
type B0 to this star. The spectrum does not allow one
to conclude firmly on the luminosity class
because the B-type luminosity criteria are not sufficiently apparent here.
Taking into account an extinction of $A_{V}$ = 1.2 mag, the absolute magnitude of this
star is M$_{V}$ = -5.98 mag. This determination points to a supergiant
Ib \citep{Fitzpatrick90}. Therefore star \#N191-8 likely provides a part of the ionizing
photons that power the compact H\,{\sc ii}\ region. See below for more details about
the evolutionary stage of this star. \\
The color-magnitude diagram of the star population
in the entire NTT field of 319$''$\hspace*{-.1cm}\,\,$\times$\,327$''$\hspace*{-.1cm}\ ($\sim$\,78\,$\times$\,79 pc)
for a cut-off magnitude of $V$ = 21 is presented in Fig. \ref{fig:cmd_n191}.
Three isochrones with ages 3 Myr, 8 Myr, and 1 Gyr, for a metallicity of
Z = 0.008 \citep{Lejeune01}, are also overplotted.
The diagram displays two principal groups: an apparent main sequence centered on
$B-V$\,$\sim$\,0.1 mag and an evolved population centered on $B-V$\,$\sim$\,1 mag. \\
Star \#1, indicated by a cross, is affected by an extinction of A$_{V}$ = 1.2 mag
therefore it seems reddened compared to the 3 Myr isochrone.
Accordingly, star \#1 appears to be the most
massive young star of the field. The stars lying across the H\,{\sc ii}\ region N191 are concentrated
along the main sequence. They are intermediate-mass stars of $\sim$\ 10-15 $M_{\odot}$\ assuming
that they are on the 3 Myr isochrone. \\
Star \#8, which we described above, is apparently
related to the 8 Myr isochrone. This means that star \#8 is probably older than
the exciting star of the compact H\,{\sc ii}\ region. The fact that no noticeable
surrounding ionized region is associated with star \#8 is compatible with this deduction.
An initial mass of 22 $M_{\odot}$\ can be derived for this evolved B-type star. \\
The second group of stars on the diagram are evolved stars between {\it B\,--\,V}\,=\,0.8
and 1.6 mag. They have a possible age ranging from 1 Gyr to 10 Gyr. It is very likely
that this latter population is not physically associated with N191.
\begin{figure*}[]
\centering
\includegraphics[width=1.0\hsize]{19706fg5.eps}
\caption{Color-magnitude {\it V} versus {\it B -- V} diagram for
stars observed toward LMC N191.
Three isochrones are shown, 3 Myr ($A_V = 0.9$ mag,
green dashed curve), 8 Myr ($A_V = 1.2$ mag, violet dotted curve) and
1 Gyr ($A_V = 0.3$ mag, blue thick dashed curve), computed for a
metallicity of Z = 0.008 \citep{Lejeune01} and a distance modulus of 18.53 mag.
The red cross indicates the location of the main exciting star of N191A.
The numbers refer to the stars listed in Table\,\ref{tab:stars_n191}.
}
\label{fig:cmd_n191}
\end{figure*}
\subsubsection{SMC N77A}
The stellar environment of N77 is totally different from that of N191.
The images reveal only two stars embedded in the blob.
These two stars have similar visual magnitudes ($V$ = 17.35 and 17.57 mag), and are
fainter than the main N191 stars.
Assuming that O-type stars have an intrinsic color of {\it B\,--\,V}\,=\,-0.28 mag
\citep[][]{Martins06} (A$_{V}$ = 2.1 mag) and a distance modulus of 18.94 mag,
the absolute magnitude of star \#1 is M$_{V}$ = -3.69 mag. It is too faint to be an
O star \citep[][]{Martins05} unless the extinction is underestimated. \\
Fig.\,\ref{fig:spectres_n77} displays the spectrum of star \#1, or more precisely the
spectrum of stars \#1 and \#2. Indeed, the two stars are too close to obtain separate spectra.
Nevertheless, the He\,{{\sc ii}}\ absorption line at $\lambda$\ 4686 is
certainly present, while in contrast He\,{{\sc ii}}\ $\lambda$\ 4541 is not observed.
These features indicate that N77-1 is an early-type B star. This agrees
with the estimate of the absolute magnitude. However, it is significantly colder than
the spectral type inferred from the H$\beta$\ flux measurement
(see Sect. 3.3 and Sect. 4).
\begin{table*}
\caption{Positions and photometry of the main stars in the fields of LMC N191 and SMC N77 $^{\dag}$}
\label{tab:stars_n191}
\begin{tabular}{l l c c c c c c c r c c}
\hline\hline
Galaxy & star ID & $\alpha$ (J2000) & $\delta$ (J2000) & $V$ & $B-V$ & $V-R$ & $J$ & $H$ & $K$
& spectral type & H\,{\sc ii}\ region \\
& & & & & & & & & & & component\\
\hline
LMC & N191-1 & 05:04:38.12 & -70:54:41.28 & 14.46 & 0.11 & -0.01 & 13.29 & 13.33 & 12.05 & O8.5 V & A \\
& N191-2 & 05:04:38.23 & -70:54:39.05 & 17.59 & 0.31 & 0.43 & & & & & A\\
& N191-3 & 05:04:37.72 & -70:54:40.50 & 17.01 & 0.30 & 0.19 & & & & & A\\
& N191-4 & 05:04:38.84 & -70:54:40.95 & 16.60 & 0.06 & 0.14 & & & & & A\\
& N191-5 & 05:04:38.86 & -70:54:42.11 & 17.06 & 0.06 & 0.17 & & & & & A\\
& N191-6 & 05:04:37.92 & -70:54:45.78 & 16.28 & 0.12 & 0.16 & 14.37 & 14.11 & 12.36 & & A\\
& N191-7 & 05:04:38.96 & -70:54:46.16 & 16.82 & 0.07 & -0.08 & & & & & A\\
& N191-8 & 05:04:31.71 & -70:54:41.06 & 13.79 & 0.14 & 0.04 & 13.16 & 13.06 & 12.91 & B0 & A\\
& N191-9 & 05:04:34.08 & -70:54:07.21 & 15.67 & 0.10 & -0.04 & 15.35 & 15.15 & 15.02 & & B \\
& N191-10 & 05:04:30.49 & -70:53:55.11 & 15.74 & 0.01 & -0.11 & 15.53 & 15.78 & 15.28 & & B\\
& N191-11 & 05:04:30.53 & -70:54:10.51 & 15.83 & 0.13 & 0.03 & 15.32 & 15.18 & 14.88 & & B\\
& N191-12 & 05:04:31.00 & -70:54:02.42 & 16.69 & 0.06 & -0.11 & & & & & B\\
& N191-13 & 05:04:39.85 & -70:54:19.00 & 17.02 & 0.38 & 0.38 & 15.00 & 14.45 & 13.71 & & \\
& N191-14 & 05:04:26.97 & -70:53:51.71 & & & & 16.34 & 14.01 & 12.44 & AGB star & \\
SMC & N77-1 & 01:02:48.98 & -71:53:16.58 & 17.35 & 0.40 & -0.23 & 14.75 & 14.51 & 14.03 & early B & A \\
& N77-2 & 01:02:49.04 & -71:53:17.69 & 17.57 & 0.30 & 0.23 & & & & & A \\
& N77-3 & 01:02:50.46 & -71:53:09.10 & 17.01 & 1.53 & 0.23 & 14.13 & 13.51 & 13.45 & G0 V (Galactic) & \\
& N77-4 & 01:02:44.12 & -71:53:30.30 & 17.06 & 0.29 & -0.38 & 16.38 & 16.02 & 14.98 & & B\\
\hline
\end{tabular} \\
$\dag$
The {\it BVR} photometry results from the NTT observations while the
{\it JHK} measures are taken from the 2MASS catalog. \\
\end{table*}
\begin{table*}
\caption{Positions and photometry of the YSO candidates in the fields of LMC N191 and SMC N77 $^{\dag}$}
\label{tab:phot_yso}
\begin{tabular}{l l l c c c c c c c c c c}
\hline\hline
Field & YSO ID & star ID & $\alpha$ (J2000) & $\delta$ (J2000) & $J$ & $H$ & $K$ & [3.6] & [4.5] & [5.8] & [8.0] & [24.0] \\
\hline
LMC N191 & YSO 1 & & 05:04:35.85 & -70:54:30.1 & 15.75 & 14.73 & 13.33 & 10.48 & 9.54 & 8.09 & 6.56 & 1.83\\
& YSO 2 & N191-13 & 05:04:39.85 & -70:54:19.0 & 15.00 & 14.45 & 13.71 & 11.28 & 10.57 & 8.68 & 6.95 & 0.85 \\
& YSO 3 & & 05:04:32.19 & -70:54:14.0 & & & & 13.32 & 12.73 & 10.95 & 9.34 & 4.84\\
SMC N77 & YSO 1 & & 01:02:48.54 & -71:53:18.0 & & & & 13.04 & 12.09 & 10.42 & 8.93 & 3.55\\
& YSO 2 & & 01:02:53.13 & -71:53:39.2 & 16.42 & 16.02 & 15.51 & 13.80 & 13.89 & 10.87 & 9.19 & 4.59\\
& YSO 3 & & 01:02:38.81 & -71:54:15.9 & & & & 14.89 & & 12.83 & 11.17 & 8.46 \\
\hline
\end{tabular} \\
$\dag$
The {\it JHK} measures are taken from the 2MASS catalog.
IRAC and MIPS photometry come from \citet{Bolatto07} for the SMC field and from \citet{Gruendl09} for the LMC field.
\end{table*}
\begin{figure*}[]
\centering
\includegraphics[width=14cm]{19706fg6.eps}
\caption{
Spectra of two stars observed toward LMC N191. Star \#1
is the exciting source of the compact H\,{\sc ii}\ region N191A. Note the
He\,{{\sc ii}}\ absorption lines indicating a hot massive star O8.5\,V.
Star \#N191-8, lying in the field of N191, is classified
as B0.}
\label{fig:spectres_n191}
\end{figure*}
\begin{figure*}[]
\centering
\includegraphics[width=14cm]{19706fg7.eps}
\caption{
Spectrograms of two stars observed toward SMC N77A. Star \#1, an early B-type star,
is the exciting source of the compact H\,{\sc ii}\ region N77A.
Star \#3 is classified as a Galactic star G0\,V.}
\label{fig:spectres_n77}
\end{figure*}
\begin{figure*}[]
\centering
\includegraphics[width=0.7\hsize]{19706fg8a.eps}
\includegraphics[width=0.7\hsize]{19706fg8b.eps}
\caption{
Mid-IR Spitzer photometry of two YSOs fitted using
YSO models \citep{Robitaille07}.
The filled circles represent the input fluxes. The black curve shows the best fit,
while the gray curves display subsequent good fits. The dashed curve shows the stellar
photosphere corresponding to the central source of the best-fitting model, as it
would look in the absence of circumstellar dust (but including interstellar extinction).
{\it Upper panel}: SED fit of N191-YSO1. The stellar mass according to these models
varies between 16 and 21 $M_{\odot}$ .
The best fit is for a 20 $M_{\odot}$\ protostar with a derived total luminosity of 2.9\,$\times$\,10$^{4}$ $L_{\odot}$ .
{\it Lower panel}: SED fit of N77-YSO1. The stellar mass according to these models varies
between 10 and 17 $M_{\odot}$ . Note that the
lack of $JHK$ photometry leads to a wider range of selected models.
The best fit is for a 10 $M_{\odot}$\ protostar with a derived total luminosity of
1.0\,$\times$\,10$^{4}$ $L_{\odot}$ .
}
\label{fig:yso}
\end{figure*}
\subsection{Chemical abundances}
Table\,\ref{tab:flux} lists the main lines of the nebular spectra of N191 and N77.
The ionic abundances with respect to H$^{+}$ were calculated from nebular lines
using the IRAF task ionic of the package NEBULAR \citep{Shaw95}. The abundance values are
listed in Table\,\ref{tab:ion}. \\
To derive the total abundances of a given element, it is necessary to estimate the amount of
the element in ionization states not observed in our spectra.
We therefore used a set of ionization-correction factors (ICFs) to convert into
elemental abundances.
The absence of the nebular He\,{{\sc ii}}\ line indicates that He$^{2+}$/H$^{+}$ is negligible.
Moreover, we assume that neutral helium is not important. Thus we
assumed that the total He/H ratio is just equal to He$^{+}$/H$^{+}$.
The total abundance of oxygen was adopted to be the sum of the O$^{+}$ and O$^{2+}$ abundances.
The absence of He\,{{\sc ii}}\ recombination lines in our spectra and the similarity between
the ionization potentials of He$^+$ and O$^{++}$ implies that the contribution of O$^{3+}$
is not significant.
To obtain the total abundance of nitrogen, we used the usual ICF
based on the similarity between the ionization potential of
N$^{+}$ and O$^{+}$ \citep{Peimbert69}. The N$^{+}$ abundance does not depend strongly
on the electron temperature. The largest errors come from the uncertainty in the
$\lambda$ $\lambda$\,6548 and 6584 line intensities. Our N result is accurate to within about
30\%. The only measurable lines in the optical range for Ne are those of
Ne$^{2+}$ but the amount of Ne$^{+}$ may be significant in the region.
We adopted the usual expression of the ionization correction factor of Ne
that assumes that the ionization structure of Ne is similar to that of O \citep{Peimbert69}. \\
The total chemical abundances for N191 and N77 are presented in Table\,\ref{tab:ab}.
The most accurately estimated abundances belong to He and O, which are accurate to within
15\% and 20\%, respectively. Table\,\ref{tab:ab} also presents the mean abundance
values derived for the SMC and the LMC \citep[][]{Russell92}. The N77 abundances agree well with
the SMC mean values.
For N191, the He abundance is quite low compared with the LMC mean value. The strong stellar He\,{\sc{i}}\ absorption lines
could have contaminated the nebular spectra of the H\,{\sc ii}\ region.
The abundance of O, N and Ne are lower than the LMC mean value but this can be explained by
uncertainties and also by the fact that N191 is more metal-poor than the LMC because of its
external position.
\begin{table*}
\caption{Nebular line intensities of the compact H\,{\sc ii}\ region LMC N191A and SMC N77A }
\label{tab:flux}
\begin{tabular}{llcccccc}
\hline \hline
\multicolumn{2}{c}{} & \multicolumn{3}{c}{LMC N191A} & \multicolumn{3}{c}{SMC N77A}\\
$\lambda$ (\AA) & Iden. & $F$($\lambda$\,)$^{\dag}$ & $I$($\lambda$\,)$^{\dag}$
& Accuracy$^{\ddag}$ & $F$($\lambda$\,)$^{\dag}$ & $I$($\lambda$\,)$^{\dag}$
& Accuracy$^{\ddag}$\\
\hline
3727,29 & [O\,{\sc ii}]\ & 279.8 & 344.0 & A & 189.8 & 229.4 & A\\
3797 & H10 & 2.7 & 3.3 & B & 3.9 & 4.6 & B\\
3835 & H9 & 3.6 & 4.3 & B & 5.6 & 6.6 & B\\
3869 & [Ne~{\sc{iii}}]\ & 2.1 & 2.5 & C & 12.7 & 14.9 & A\\
3889,90 & He\,{\sc{i}}\ + H8 & 9.9 & 11.7 & A & 13.6 & 15.9 & A\\
3968,70 & [Ne~{\sc{iii}}]\ + H$\epsilon$\ & 9.7 & 11.3 & A & 14.5 & 16.7 & A\\
4071 & [S\,{\sc ii}]\ & 1.2 & 1.4 & D & & \\
4101 & H$\delta$\ & 39.1 & 44.3 & A & 19.0 & 21.3 & A\\
4144 & He\,{\sc{i}}\ & 1.3 & 1.5 & D & & \\
4340 & H$\gamma$\ & 34.6 & 37.5 & A & 40.3 & 43.4 & A\\
4363 & [O\,{\sc iii}]\ & & & & 4.8 & 5.1 & B\\
4471 & He\,{\sc{i}}\ & & & & 4.3 & 4.5 & C\\
4861 & H$\beta$\ & 100.0 & 100.0 & A & 100 & 100 & A\\
4959 & [O\,{\sc iii}]\ & 49.7 & 49.1 & A & 104.6 & 103.4 & A\\
5007 & [O\,{\sc iii}]\ & 147.4 & 144.6 & A & 300.2 & 295.0 & A\\
5577 & [O\,{\sc i}]\ & 20.2 & 18.6 & A & 45.2 & 41.9 & A\\
5876 & He\,{\sc{i}}\ & 10.5 & 9.4 & B & 11.1 & 10.0 & B\\
6300 & [O\,{\sc i}]\ & 9.8 & 8.5 & C & 73.8 & 64.5 & A \\
6312 & [S\,{\sc iii}]\ & 1.2 & 1.0 & C & 2.0 & 1.7 & C \\
6363 & [O\,{\sc i}]\ & 3.1 & 2.7 & D & 23.4 & 20.4 & A\\
6548 & [N\,{\sc{ii}}]\ & 13.2 & 11.2 & C & 10.1 & 8.7 & C\\
6563 & H$\alpha$\ & 340.0 & 286.0 & A & 335.7 & 286 & A\\
6584 & [N\,{\sc{ii}}]\ & 37.9 & 32.0 & B & 11.6 & 9.9 & B\\
6678 & He\,{\sc{i}}\ & 2.7 & 2.3 & B & 3.2 & 2.7 & B\\
6716 & [S\,{\sc ii}]\ & 17.2 & 14.4 & B & 15.8 & 13.4 & B\\
6731 & [S\,{\sc ii}]\ & 15.9 & 13.3 & B & 11.6 & 9.9 & B\\
7065 & He\,{\sc{i}}\ & 2.4 & 2.0 & C & 2.8 & 2.3 & C \\
7135 & [Ar\,{\sc{iii}}] & 8.7 & 7.1 & B & 9.2 & 7.6 & B \\
7236 & [Ar\,{\sc{iv}}]\ & 1.9 & 1.5 & D & 14.2 & 11.7 & B\\
7323 & [O\,{\sc ii}]\ & 12.9 & 10.3 & C & 22.0 & 18.0 & C \\
7751 & [Ar\,{\sc{iii}}]\ & 3.9 & 3.0 & C & 16.2 & 12.9 & B\\
\hline
c(H$\beta$) = & & 0.24 & & & 0.22 & & \\
\hline
\end{tabular}
$\dag$ $F(\lambda)$ and $I(\lambda)$ represent
observed and de-reddened line intensities relative to H$\beta$ . \\
$\ddag$ The capital letters represent the following uncertainties:
A $<$\,10\%, B=10--20\%, C=20--30\%, and D$>$\,30\%.
\end{table*}
\begin{table*}
\caption{Nebular ionic abundances}
\label{tab:ion}
\begin{tabular}{lcc}
\hline\hline
Ion & LMC N191 & SMC N77 \\
\hline
He$^+$/H$^+$ & 0.070 & 0.079 \\
O$^+$/H$^+$ ($\times$\,$10^{5}$) & 14.9 & 6.76 \\
O$^{++}$/H$^+$ ($\times$\,$10^{5}$) & 5.10 & 3.63 \\
N$^+$/H$^+$ ($\times$\,$10^{6}$) & 6.4 & 2.5 \\
Ne$^{++}$/H$^+$ ($\times$\,$10^{6}$) & 2.77 & 4.75 \\
S$^+$/H$^+$ ($\times$\,$10^{7}$) & 6.86 & 5.33 \\
Ar$^{++}$/H$^+$ ($\times$\,$10^{7}$) & 6.54 & 3.36 \\
\hline
\end{tabular}
\end{table*}
\vspace{0.5cm}
\begin{table*}
\caption{Elemental abundances $^{\dag}$}
\label{tab:ab}
\begin{tabular}{lcccc}
\hline\hline
Element & SMC N77 & mean SMC$^{\ddag}$ & LMC N191 & mean LMC$^{\ddag}$ \\
\hline
He/H & 0.079 & 0.081 & 0.070 & 0.089\\
O/H ($\times$\,$10^{4}$) & 1.04 & 1.07 & 2.0 & 2.24\\
N/H ($\times$\,$10^{6}$) & 3.85 & 4.27 & 8.58 & 13.8\\
Ne/H ($\times$\,$10^{5}$) & 1.34 & 1.86 & 1.09 & 4.07\\
\hline
\end{tabular}
$\dag$ See Sect. 3.5 for uncertainties \\
$\ddag$ \citet[][]{Russell92}
\end{table*}
\section{Discussion}
The two H\,{\sc ii}\ regions studied in this paper, LMC N191A and SMC N77A, belong to
the class of compact H\,{\sc ii}\ regions, which are regions of newly
formed massive stars in the Magellanic Clouds.
Their sizes ($\sim$\, 5 to 10$''$\hspace*{-.1cm}\ ) are much smaller than those of typical H\,{\sc ii}\
regions in the Magellanic Clouds (several arc minutes). They are also associated with a
much smaller number of exciting stars. With an [O\,{\sc iii}]\,($\lambda$ $\lambda$\,4959\,+\,5007)\,/\,H$\beta$\ ratio
of $\sim$\,2 and an H$\beta$\ luminosity of 8.3\,$\times$\,10$^{36}$ erg s$^{-1}$,
LMC N191A is a low-excitation blobs (LEBs), as defined by
\citet[][]{Meynadier07}.
At the same H$\beta$\ luminosity, LEBs have lower excitation than HEBs and are powered by less
massive exciting stars.
In contrast, SMC N77A conforms more to the
defining criteria of high-excitation blobs (HEBs), because it has an
[O\,{\sc iii}] /H$\beta$\ ratio of $\sim$\,4 and an H$\beta$\ luminosity of 1.1\,$\times$\,10$^{36}$ erg s$^{-1}$.
Nevertheless, with a diameter of $\sim$\,20$''$\hspace*{-.1cm} , N77A is more extended than a typical HEB.
Compared with N191A, N77A is more than a factor of two larger,
about 40\% more massive, but less dense and less extincted (Table 1). \\
N191 is situated outside of the main body of
the LMC, at a large distance (3 kpc) from the major
star-forming region 30 Dor. Among the southernmost H\,{\sc ii}\ region of LMC only
N214 \citep[][and references therein]{Meynadier05}
and N206 \citep[][and references therein]{Romita10}
have been investigated in detail. In comparison with these two complexes,
N191 is a smaller region with a fainter emission nebula.
It is also linked to an OB association, LH 23 \citep[][]{Lucke70}, and a giant
molecular cloud, 54\,$\times$\,14 pc in size with a CO mass of 2\,$\times$ 10$^{5}$ $M_{\odot}$\ \citep[][]{Fukui08}.
N77 is the northernmost H\,{\sc ii}\ region of the SMC so far studied in detail.
It lies some 440 pc north of N66, the main star-forming region in the SMC
\citep[][and references therein]{MHM10a}. N77 is associated with a small
molecular cloud \citep[][]{Mizuno01} and a small OB association, B-OB 24
\citep[][]{Battinelli91}. \\
An accurate characterization of the exciting source of each of these two compact
H\,{\sc ii}\ regions requires a more detailed investigation. There is indeed a discrepancy between
the spectral type indicated by the spectra and that derived from H$\beta$\ flux estimates.
The spectrum obtained toward star \#1 in LMC N191A belongs to an O8.5 V type (Sect. 3.4).
In contrast, the H$\beta$\ flux indicates an earlier O5 V type (Sect. 3.3).
The same is true for SMC N77A. The spectral classification indicates an early B type star
(Sect. 3.4), whereas the H$\beta$\ luminosity suggests an O8 V type at least (Sect. 3.3).
This discrepancy can be accounted for
by the presence of hotter stars embedded in the H\,{\sc ii}\ regions.
This assumption is in line with the indication of the ($V, B-V$) color-magnitude diagram
(Fig.\,\ref{fig:cmd_n191}) that is a 40 $M_{\odot}$\ star of
3 Myr old for the main exciting source of N191A. For N77A, we detected
the nebular [O\,{\sc iii}]\ line $\lambda$\,4363, which needs a much earlier exciting star than
a B type. We note, however, that this line is not reported in the paper by
\citet[][]{Russell90}. Still, high-resolution observations in near-IR are
necessary to check the possibility of embedded sources. Another explanation
is that the spectral type is underestimated because of
contamination from nebular emission
lines. However, justifying a three-subtype uncertainty seems difficult. \\
The difference between the excitation degrees of LMC N191A and SMC N77A, as mentioned
above, can be commented upon from another viewpoint:
the [O\,{\sc iii}] /H$\beta$\ ratio is higher in N77A ($\sim$\,4) compared to that
in N191 ($\sim$\,2), even if the latter is denser. According to models for homogeneous H\,{\sc ii}\ regions
\citep[e.g.,][]{Stasinska90} the [O\,{\sc iii}] /H$\beta$\ ratio is proportional to the electron
density for a given exciting source. This means that we expect a lower ratio
for N77A if the exciting sources have comparable effective temperatures. The higher
[O\,{\sc iii}] /H$\beta$\ ratio in N77A would suggest a hotter star than in N191A.
Alternatively, the weak ratio of N191A may be due to the density structure
of N191A. The aforementioned models predict that a density rise in the
outer zones of an H\,{\sc ii}\ region results in a decrease of the global [O\,{\sc iii}] /H$\beta$\ ratio.
Otherwise the higher [O\,{\sc iii}] /H$\beta$\
ratio of N77A may reflect the difference of metallicity between the LMC and SMC.
In low-metallicity environments, the inefficiency of
cooling raises the electron temperature so that forbidden oxygen lines
become stronger despite the lower abundance. These assumptions may explain
the apparent discrepancy
between the higher [O\,{\sc iii}] /H$\beta$\ ratio of N77A and the cooler ionizing star inferred from
our study.\\
Three young stellar object (YSO) candidates detected by
\citet{Gruendl09} lie toward N191A. Similarly, there are three such candidates
detected in the N77 field \citep{Bolatto07}. They are indicated in
Figs.\,\ref{fig:n191} and \ref{fig:n77} (lower panels) and
Figs.\,\ref{fig:n191_spitzer} and \ref{fig:n77_spitzer}, respectively.
Their coordinates are listed in Table\,\ref{tab:phot_yso}.
The N191 candidates lack an optical counterpart except for object \#13. The closest
candidate to N191A, named 050435.85-705430.1 (or N191-YSO1 in the present paper),
lies about 15$''$\hspace*{-.1cm}\ (3.6 pc) northeast of the exciting star \#1. The {\it JHK} and Spitzer
IRAC and MIPS colors of these objects are listed in Table 3.
Two of the N77 candidates (YSO2 and YSO3) seem very close to two
relatively bright field stars, which raises the question of their association.
However, astrometrically speaking, these stars and the YSO candidates are not
associated. It seems that these YSO candidates have very faint optical
counterparts below our detection limits.\\
A comprehensive search for YSOs in the LMC has also been carried out by the SAGE team
and was reported by \citet{Whitney08}.
They have found only one YSO in the vicinity of N191A that corresponds to the closest
candidate of \citet{Gruendl09}.
This difference is due to the different selection criteria, based on IRAC colors,
used in different works.
\citet{Gruendl09} argue that there are no simple criteria in color-magnitude
space that can unambiguously separate the YSOs
from AGB/post-AGB stars, planetary nebulae, and background galaxies.
Moreover, the point source definition differs in the two approaches.
\citet{Gruendl09} include slightly extended sources that are likely
YSOs superimposed on a bright background. In contrast, because the SAGE definition
is more constraining, it excludes such cases. \\
To be more specific, here we applied the selection diagrams used by
\cite{Simon07} to look into the nature of the YSO candidates. These authors
used color-color diagrams [3.6]-[4.5] versus [5.8]-[8.0] and
[3.6]-[4.5] versus [4.5]-[8.0] to characterize
the candidate YSOs they have detected toward the SMC H\,{\sc ii}\ region NGC 346 (N66).
We note that on the [3.6]-[4.5] versus [5.8]-[8.0] plot two of the YSO
candidates have colors near to those of YSOs. These are N191 YSO3 and N77 YSO1. The
other candidates show colors of ``probable YSOs'' or ``poor fits/PAHs''.
Note that the [3.6]-[4.5] versus [5.8]-[8.0] plot does not clearly separate YSOs
from other types of sources,
particularly stars with modest IR excesses and sources with PAH contamination
\citep{Simon07}. We also used
the [3.6]-[4.5] versus [4.5]-[8.0] plot, which takes advantage of longer color baselines and the
abrupt change in YSO spectra between the 4.5 and 5.8 $\mu$m bands, to distinguish YSOs
from stars, galaxies, and PAH. However, on this plot all YSO candidates are offset
with respect to the expected YSO positions, since they are redder, i.e.
with higher [4.5]-[8.0] color values, compared to YSOs. \\
A spectral energy distribution (SED) analysis provides a more efficient method
for investigating the nature of YSOs. We used the Spitzer photometry to construct the
mid-IR SEDs of our YSO candidates. We fitted these SEDs with the library of YSO models
by \cite{Robitaille06} using the online SED fitting tool provided by these authors
\citep{Robitaille07}\footnote{ and available at
http://caravan.astro.wisc.edu/protostars}.
The SED plots of the brightest YSO candidates toward N191 and N77 are displayed in
Fig.\,\ref{fig:yso}, N191-YSO1 is best fitted by models of 16 to 21 $M_{\odot}$ .
The best fit suggests a 20 $M_{\odot}$\ protostar with a total luminosity
of 2.9\,$\times$\,10$^{4}$ $L_{\odot}$ .
With regard to N77-YSO1, the best models belong to masses ranging from
10 to 17 $M_{\odot}$ . The best fit is for a 10 $M_{\odot}$\ protostar with a total luminosity of
1.0\,$\times$\,10$^{4}$ $L_{\odot}$ . The majority of the best-fit YSO models include both
circumstellar envelopes and disks.
Moreover, in these models the accretion rate from the envelope onto the
YSO indicates the evolutionary stage of the protostar \citep{Robitaille06}.
Based on the high accretion rates, 1\,$\times$\,10$^{-4}$ and 5\,$\times$\,10$^{-5}$ $M_{\odot}$\ yr$^{-1}$ for
N191-YSO1 and N77-YSO1, respectively,
both objects can be classified as Stage I sources \citep{Robitaille06}.
This classification is equivalent to the traditional Class I source.
It should, however, be cautioned that these models are based on low-mass star formation
scenarios, whereas we do not know how massive stars actually form. Therefore,
massive YSOs may contrast in their properties with commonly studied low-mass YSOs, in particular
in low-metallicity environments such as the Magellanic Clouds.
However, the use of these models must be considered as a first approach and preliminary
screening of the problem. \\
The two brightest YSO candidates are also the most closely adjacent objects to their respective
H\,{\sc ii}\ regions. More specifically, N191 YSO1 lies 15$''$\hspace*{-.1cm}\ northwest of star \#1, while
N77 YSO1 is seen toward the central dust lane of the compact H\,{\sc ii}\ region, very
close to the exciting star. Since N191 and N77 are young active H\,{\sc ii}\ regions, these
two YSO candidates may effectively be associated with them.
The presence of these YSO candidates confirms that star formation activity is still
ongoing in N191A and N77A. More specifically, massive protostars of $\sim$\, 10 and
20 $M_{\odot}$\ are in the process of formation.
The YSOs may have been triggered by the ionization front progression in the associated
molecular clouds. However, high-resolution submillimeter observations, such as
those of ALMA, are required to check these first results. \\
\section{Concluding remarks}
This paper presented the first detailed study of LMC N191A and SMC N77A using
imaging and spectroscopy in the optical obtained at the ESO NTT as well as Spitzer
and 2MASS data archives. The two objects are among the outermost star-forming regions of the
Magellanic Clouds. We derived several physical characteristics of these regions and their
powering sources. The compact H\,{\sc ii}\ region N191A, $\sim$\,10$''$\hspace*{-.1cm}\ (2.4 pc) in diameter,
belongs to a small class of ``low-excitation blobs''
in the Magellanic Clouds. In contrast, SMC N77A, $\sim$\,20$''$\hspace*{-.1cm}\ (5.8 pc) in size,
belongs to the ``high-excitation blob'' family.
The class of compact H\,{\sc ii}\ regions in the Magellanic Clouds is not very populated. Therefore new
members provide additional data for improving our knowledge of their
characteristics and their formation processes.
Higher resolution observations are necessary to deepen the study of these objects.
|
{
"timestamp": "2012-06-27T02:03:20",
"yymm": "1206",
"arxiv_id": "1206.5939",
"language": "en",
"url": "https://arxiv.org/abs/1206.5939"
}
|
\section{Introduction}
\label{section: intro}
This paper deals with the existence of bounded traveling fronts for the
reaction-diffusion equation
\begin{equation}
\label{main}
\frac{\partial u}{\partial t} - \Delta u =h(y,u) \qquad t\in \mathbb{R}, \; x=(x_1,y)\in \mathbb{R}^N.
\end{equation}
The function $h$ will be of three different forms in this paper. The first two concern non-linear terms $h(y,u)=f(u) - \alpha g(y) u$ where $f:\mathbb{R} \rightarrow \mathbb{R}$ is $\mathrm{C}^1$, and either of KPP type, or of bistable type and $g:\mathbb{R}^{N-1} \rightarrow \mathbb{R}_+$ is $\mathrm{C}^0$, $g(0)=0$ and $g\xrightarrow{|y|\rightarrow +\infty} +\infty$.
The existence of traveling front depends on the value of $\alpha>0$. The third case we consider here is when $h(y,u)=f(u)$ for $|y|\leq L_1$ and $ h(y,u)\leq -mu$ for $|y|\geq L_2$ where $0<L_1\leq L_2 <\infty$ are given parameters and $f$ is of bistable form and $h(y,u)+mu \rightarrow 0$ for $|y|\rightarrow +\infty$. We study the existence of traveling fronts depending on the value of $L_1$ and $L_2$.\\
The problems we study in this paper bear some similarities with the question of traveling fronts in cylinders of \cite{BN91}. However there are important differences that have to do with the fact that the cross section in \cite{BN91} was bounded and only the Neumann condition was considered there. Whereas here, the problem is posed in the whole space and the solution vanish at infinity in directions orthogonal to the direction of propagation. We follow the same general scheme as in \cite{BN91} and in particular the sliding method. But some new ideas are also required. In particular, first, we treat directly the KPP case without the approximation of the KPP non-linearity by a combustion non-linearity as in \cite{BN91}. Then in the approach of Berestycki - Nirenberg \cite{BN91} to traveling fronts in cylinders for the bistable case, a useful result of H. Matano \cite{HM79} was involved in the proof. Here, we rely on stability ideas but also use energy minimization properties to bound the speed of the solution in the finite domain approximation. In particular, we do not use the precise exponential behavior that was used in \cite{BN91}.
Actually the developments of this method that we present in this paper can be used to somewhat simplify parts of \cite{BN91}. They can also be applied to traveling fronts in cylinder with Robin or Dirichlet boundary conditions. \footnote{The construction of traveling fronts for Neumann and Dirichlet conditions in cylinders given by \cite{Vega93} appears to be incomplete. Indeed, the continuity of the function $\phi$ on page 515 is not established before using Dini's Theorem to derive Lemma 3.2 there.}\\
Equation \eqref{main} in the first case comes from a model in population dynamics \cite{DFP03} that we briefly describe now. Let $u(t,x,v)$ represent the density of individuals at time $t$ and position $x$ that possess some given quantitative genetic trait represented by a continuous variable $v\in \mathbb{R}$.
The latter could be for example the size of wings or the height of an individual. We assume that individuals follow a brownian motion (i.e. they diffuse) in space with a constant diffusion coefficient $\nu$, reproduce identically and disappear with a growth rate $k(x,v)$ that depends on the position $x$ and on the trait $v$. Furthermore, they also reproduce with mutation that is represented by a kernel $K(x,v,w)$ and disappear due to competition with a constant $L>0$. Thus, one is led to the following equation for $u$:
\begin{equation}
\label{DP1}
\begin{array}{l}
\partial_t u(t,x,v) - \nu \Delta_x u(t,x,v) =
k(x,v)\, u(t,x,v) \\ \qquad \qquad \qquad
+ \int_{w} K(x,v,w) \,u(t,x,w)\,dw
- u(t,x,v)\, \int_{w}
L u(t,x,w)\, dw .
\end{array}
\end{equation}
We assume moreover that there exists a most adapted trait $\phi=\phi(x)$ that may depends on the location $x$. The farther the trait of an individual is from the most adapted trait, the larger the probability of dying and not reproducing. Thus the growth rate can be written for example as $k(x,v) = a - b\,|v - \phi(x)|^2$ with $a$ and $b>0$. Non-local reaction-diffusion equations of this type are quite difficult to handle from a mathematical standpoint as shown in \cite{BNPR09} where behaviors very different from those in local equation are brought to light. In a forthcoming numerical study \cite{avenir_num}, we show that depending on the value of $\alpha$, the traveling fronts may not be monotonous anymore. This fact was also established in a different context for some related types of non-local equations in \cite{BNPR09}. This leads us to introduce a simplified version of this model that emphasizes propagation guided by the environment. First, we assume that mutations are due to a diffusion process represented by a Brownian motion in the space of trait $v$. Furthermore, we assume that $\phi$ is linear. Then a rotation in the variables $(x_1,y)$ allows one to reduce the problem to the case where the most adapted trait is $y=0$. Therefore we assume $\phi(x)=0$ and \eqref{DP1} can be rewritten as
\begin{equation}
\label{DP2}
\partial_t u(t,x,v) - \nu \Delta_{x,v} u(t,x,v) = (a - b|v|^2)u(t,x,v)
- u(t,x,v)\, \int_{w} L u(t,x,w)\, dw .
\end{equation}
Lastly we assume that competition is only between individuals sharing the same trait which leads us to equation
\begin{equation}
\label{DP3}
\partial_t u - \nu \Delta_{x,v}u = (a -Lu)u - b|v|^2u.
\end{equation}
Equation \eqref{main} is a generalization of this equation. In \cite{DFP03}, the authors observe numerically a generalized transition front spreading along the graph of $\phi$ for equation \eqref{DP1} (see \cite{BH06, BH07, BH12, HM12} or \cite{Shen} for the definition of generalized transition fronts). Here we want to prove theoretically (i) that there exists such a front for equation \eqref{main} at least for some values of the parameter $\alpha>0$ and (ii) that extinction occurs if $\alpha$ is too large. The latter condition can be interpreted as saying that the ``area'' of adapted traits is too thin compared to the diffusion. To remain consistent with the biological motivation, we only consider here non-negative and bounded solutions of \eqref{main}.\\
Other types of models related to this one have been proposed in the literature. For example, the model developed by Kirkpatrick and Barton in 1997 \cite{KB97} also studies the evolution of a population and of its mean trait. The main difference is that they have a system in $u$ and $v$ where $u$ represents the population and $v$ the mean trait is described by a specific equation. This model has been further explored many times since \cite{FHB08, HBFF11}. It is worth noting that these models use the same type of non-linearity for the adaptation to the environment and model the mutation with the Laplace operator as well rather than the integral operators.\\
This type of reaction-diffusion process in heterogeneous media also arises in many contexts in medicine. An important class of such models was treated in \cite{C07, PMHC09}. They deal with the propagation of a {\em cortical spreading depression\/} (CSD) in the human brain. These CSD's are transient depolarizations of the brain that slowly propagate in the cortex of several animal species after a stroke, a head injury, or seizures \cite{somjen}. They also are suspected of being responsible for the aura in \textit{migraines with aura}. CSD's are the subject of intensive research in biology since experiments blocking them during strokes in rodents have produced very promising results \cite{DeKeyser99, Nedergaard95}. These observations however have not been confirmed in humans and the existence of CSD's in the human brain is still a matter of debate \cite{Mayevsky96, Gorgi01, Back00, Strong02}. Since very few experiments and measurements on human brain are available be it for ethical or for technical reasons, mathematical models of a CSD is help in understanding their existence and conditions for propagation. In such a problem, the morphology of the brain and thus the geometry of the domain where CSD's propagate, is believed to play an important role. \\
The brain is composed of gray matter where neuron's soma are and of the white matter where only axons are to be found. The rodent brain (on which most of the biological experiments are done) is rather smooth and composed almost entirely of gray matter. On the opposite, the human brain is very tortuous. The gray matter is a thin layer at the periphery of the brain with much thickness variations and convolutions, the rest of the brain being composed of white matter. According to mathematical models of CSDs \cite{Cetal08, somjen, shapiro01, tuckwell80}, the depolarization amplitude follows a reaction-diffusion process of bistable type in the gray matter of the brain while it diffuses and is absorbed in the white matter of the brain. The modeling of CSD hence leads one to the study of equations of the following type:
\begin{equation}
\label{SD}
\frac{\partial u}{\partial t}-\Delta u =f(u) \mathbf{1}_{|y|<L}-\alpha u\mathbf{1}_{|y|\geq L} \qquad t\in \mathbb{R}, \; x=(x_1,y)\in \mathbb{R}^N.
\end{equation}
Here, $f$ is of bistable type and $|y|=L$ corresponds to the transition from gray matter to white matter. In \cite{C07}, this equation was studied to prove that the thinness of the human gray matter ($L$ small) may prevent the creation or the propagation of CSDs on large distances. It was proved by studying the energy in a traveling referential of the solution of \eqref{SD} with a specific initial condition. The special case of \eqref{SD} for $N=2$ was described more completely in \cite{CJ11}. In \cite{PMHC09}, a numerical study shows that the convolutions of the brain have also a strong influence on the propagation of CSD. Finally, in \cite{CG05}, the effect of rapid variations of thickness of the gray matter was studied. \\
Finally, let us note that the same kind of equation arises in the modeling of tumor cords but with a slightly more complicate KPP non-linearity. We plan to investigate this model in our forthcoming work \cite{avenir_cancer}.\\
As already mentioned, the study of propagation of fronts and spreading properties in heterogeneous media is of intense current interest. For instance, the existence of fronts propagating in non-homogeneous geometries with obstacles has been established in Berestycki, Hamel and Matano \cite{BHM09}. Definitions of generalized waves have been given by Berestycki and Hamel in \cite{BH07} and \cite{BH12} where they are called generalized transition waves. Somewhat different approaches to generalizing the notions of traveling fronts have been proposed by H. Matano \cite{HM12} and W. Shen \cite{Shen}.
The existence of fronts for non-homogeneous equations are established in \cite{NRRZ12} and \cite{Z12}. \\
Let us first introduce some notations before stating the main results.
\begin{notations}
We note $x=(x_1,y) \in \mathbb{R}^N$ where $x_1\in \mathbb{R}$ and $y\in
\mathbb{R}^{N-1}$. Hence $x$ is the space variable in $\mathbb{R}^N$, $x_1$ is its first coordinate and $y$ is the vector of $\mathbb{R}^{N-1}$ composed of all the other coordinates of $x$.
As usual $B_R=B(0,R)$ denotes a ball of radius $R$ centered at 0, but here it will always mean the ball in $\mathbb{R}^{N-1}$.
\end{notations}
First we are interested in solutions of
\begin{equation}
\label{EP}
\begin{cases}
\displaystyle \frac{\partial u}{\partial t}- \Delta u = f(u)-\alpha g(y) u, \quad
x=(x_1,y)\in \mathbb{R}^N, \; t\in \mathbb{R} \\
u\geq 0, \quad u \text{ bounded,}
\end{cases}
\end{equation}
with $\alpha>0$. We will assume that $f:\mathbb{R} \rightarrow \mathbb{R} \text{ is } \mathrm{C}^1$ and satisfies either one of the following conditions:
$$
f(0)=f(1)=0,\; f>0 \text{ on } (0,1) \text{ and } f(s) < f'(0)s \text{ for } s\in (0,1),
$$
or
$$
\begin{array}{c}
\text{ there exists } \theta \in ]0,1[ \text{ such that } f(0)=f(\theta)=f(1)=0,\\
f<0 \text{ on } (0,\theta) \text{ and } f>0 \text{ on } (\theta,1)
\text{ and } f'(0)>0, \; f'(1)>0, \\
\int_0^1 f(s)ds>0.
\end{array}
$$
The first case will be referred to as the KPP case and the second one will be called bistable case. Since we are only interested in solutions of \eqref{EP} in $[0,1]$, we will further assume that $f(s)\leq 0$ for $s\geq 1$.
Moreover we always assume
\begin{equation}
\label{Hypgpos}
g:\mathbb{R}^{N-1} \rightarrow \mathbb{R}_+ \text{ is continuous, } g(0)=0, \; g>0 \text{ on }\mathbb{R}^{N-1}\setminus\{0\}
\end{equation}
and
\begin{equation}
\label{Hypginf}
\lim_{|y|\rightarrow +\infty}g(y)=+\infty.
\end{equation}
Taking $g(y)=|y|^2$ and $f(s)=as(1-s)$ yields the particular case of equation \eqref{DP3}.\\
This paper is concerned with the long term behavior of \eqref{EP} and with the existence of curved traveling fronts, i.e. solutions $u(t,x)=U(x_1-ct,y)$ with $c\in \mathbb{R}$ a constant and $U:\mathbb{R}^N \rightarrow \mathbb{R}$ such that the limits $\lim_{s\rightarrow \pm \infty}U(s,.)$ exist uniformly and are not equal. Regarding these fronts, our main results are the following.
\begin{theorem}
\label{existFP}
If $f$ is of KPP type, there exists $\alpha_0>0$ such that:
\begin{itemize}
\item For $\alpha \geq \alpha_0$, there exists no traveling front solution of \eqref{EP},
\item For $\alpha <\alpha_0$ there exists a threshold $c_*>0$ such that there exists a traveling front of speed $c$ of equation \eqref{EP} if and only if $c\geq c_*$.
\end{itemize}
\end{theorem}
This existence theorem gives us information on the behavior of the solution of the parabolic problem. In this paper we prove the following theorem:
\begin{theorem}
If $f$ is of KPP type, for $u_0\in L^\infty$, there exists a unique solution $u(t,x)$ of
$$
\begin{cases}
\partial_t u -\Delta u =f(u) -\alpha g(y) u \quad
&\text{on } (0,+\infty)\times \mathbb{R}^N,\\
u(0,x)=u_0(x) & \text{on } \mathbb{R}^N.
\end{cases}
$$
\begin{itemize}
\item If $\alpha\geq \alpha_0$, it verifies $u(t,x) \xrightarrow{t\rightarrow
+\infty} 0$ uniformly with respect to $x\in \mathbb{R}^N$.
\item If $\alpha < \alpha_0$ and $u_0\in \mathrm{C}^0_0(\mathbb{R}^N)$ with $u_0< V$ where $V=V(y)$ is the unique positive asymptotic profile (stationary solution ).
\begin{eqnarray*}
\text{for any }c>c^* \quad \lim_{t\rightarrow +\infty} \sup_{|x_1|\geq ct}
u(t,x)=0, \\
\text{for any } c \text{ with } 0\leq c <c^* \quad \lim_{t\rightarrow +\infty} \sup_{|x_1| < ct} |u(t,x)-V(y)|=0.
\end{eqnarray*}
\end{itemize}
\end{theorem}
This means that there is a threshold value $\alpha_0$ such that for $\alpha \geq \alpha_0$, there is {\em extinction}. On the contrary, when $\alpha \leq \alpha_0$, there is spreading and the state $V(y)$ invades the whole space. The asymptotic speed of spreading is then $c^*$.
The property of asymptotic spreading is in the same spirit of the theorem of asymptotic speed of spreading in cylinders established by Mallordy and Roquejoffre in \cite{MR95}.\\
Regarding the case of bistable $f$ we have the following result:
\begin{theorem}
\label{existFPb}
If $f$ is of bistable type, there exist $\alpha^* \geq \alpha_*>0$ such that
\begin{itemize}
\item For $\alpha\geq \alpha^*$, there exists no traveling front solution of \eqref{EP},
\item For $\alpha <\alpha_*$, under condition~\ref{uniqsPA} of Section~\ref{section: APbist}, there exists a traveling front $u$ of speed $c>0$ solution of \eqref{EP}.
\end{itemize}
\end{theorem}
Lastly, the model for CSD's leads one to equations of the type
\begin{equation}
\label{Egenintro}
\partial_t u -\Delta u=h(y,u) \quad x=(x_1,y)\in \mathbb{R}^N.
\end{equation}
where $h(y,u)$ that verifies
\begin{eqnarray*}
&& h(y,u)=f(u) \text{ for } |y|\leq L_1 \\
&& h(y,u)\leq -mu \text{ for } |y|\geq L_2\\
&& h(y,u)+mu\xrightarrow{|y|\rightarrow +\infty} 0 \quad \text{uniformly for } u\in \mathbb{R}^+
\end{eqnarray*}
where $0<L_1\leq L_2 <\infty$ and $m>0$ are given parameters and $f$ is of bistable form. \\
In this paper we prove the following Theorem.
\begin{theorem}
\label{thmSDTF}
There exist critical radii $0<L_*\leq L^*<\infty$ with the following properties:
\begin{itemize}
\item For $L_2<L_*$, there is no traveling front solution of \eqref{Egenintro}.
\item For $L_1>L^*$ (independently of $L_2$), assuming that there is a unique stable asymptotic profile of \eqref{APSD}, there exists a traveling front of speed $c>0$ solution of \eqref{Egenintro}.
\end{itemize}
\end{theorem}
This result completes the study in \cite{C07} on the existence of CSD in the human brain. Indeed in \cite{C07} the transition from gray to white matter was instantaneous when biologically there is a smooth transition from
gray to white matter. This Theorem confirms the intuition that CSD's can be found in part of the human brain where the gray matter is sufficiently thick but they can not propagate over large distances due to a thin gray matter in many parts of the human brain.\\
The paper is organized as follows. In section \ref{section: prelim} we state some preliminary results that will be used in the sequel. Section \ref{section: asympt} is dedicated to the study of the existence and uniqueness of non-zero asymptotic profiles for a traveling front solution of \eqref{EP}. In section \ref{section: convergence} we study the large time behavior. There we prove extinction if $\alpha \geq \alpha_0$ and convergence towards the front of minimal speed if $\alpha <\alpha_0$. Then, section \ref{section: APbist} is devoted to the study of the asymptotic profiles in the bistable case and section \ref{section: TFbist} to the existence of traveling front for $\alpha<\alpha_*$ in the bistable case. Lastly, in section~\ref{section: CSD} we describe the precise problem arising in the modeling of CSD's and state our main result in this framework.
\section{Preliminary results}
\label{section: prelim}
In our proofs, we will several times need the exponential decay of the asymptotic profile which can be easily proved from the following theorem established in \cite{BR08}.
\begin{theorem}
\label{BR}
Let $v\in H^2_{\text{loc}}(\mathbb{R}^N)$ be a positive function. Assume that
there exists $\gamma>0$ and $C>0$ such that
$$
\forall x\in \mathbb{R}^N, \quad v(x)\leq Ce^{\sqrt{\gamma}|x|} \text{ and }
\liminf_{|x|\rightarrow \infty} \frac{\Delta v(x)}{v(x)}>\gamma.
$$
Then, $\displaystyle \lim_{|x|\rightarrow
\infty}v(x)e^{\sqrt{\gamma}|x|}=0$.
\end{theorem}
This result is established in \cite{BR08}, lemma 2.2.
In the context of equation \eqref{main}, we thus have the following corollary.
\begin{corollary}
\label{decry}
Let $u$ be a non-negative and bounded solution of
$$
\Delta v +f(v)-\alpha g v=0 \quad \text{on } \mathbb{R}^{N-1}.
$$
Then, for any $\gamma>0$ there exists $C>0$ such that
$$
0\leq v(y) \leq Ce^{-\gamma |y|} \qquad \text{and} \qquad
|\nabla v(y)|\leq Ce^{-\gamma |y|}.
$$
\end{corollary}
\begin{proof}
The estimate on $v$ comes directly from Theorem \ref{BR} and the estimate on $|\nabla v|$ derives from standard global $L^p$ estimates.
\end{proof}
\section{The case of a KPP non-linearity. Asymptotic profiles.}
\label{section: asympt}
In this section, we are interested in the asymptotic profiles of a
traveling front solution of \eqref{EP} as $x_1\rightarrow \pm \infty$. Hence, we are looking for solutions of the following equation
\begin{equation}
\label{PA}
\begin{cases}
\Delta V +f (V) -\alpha g(y) V=0, \quad & y\in \mathbb{R}^{N-1},
\\
V \geq 0, \quad V \text{ bounded.}
\end{cases}
\end{equation}
We assume that $f:\mathbb{R} \rightarrow \mathbb{R} \text{ is } \mathrm{C}^1$,
\begin{equation}
\label{Hypfpos}
f(0)=f(1)=0,\; f>0 \text{ on } (0,1)
\end{equation}
and
\begin{equation}
\label{HypfKPP}
f(s) < f'(0)s \text{ for } s\in (0,1) \text{ and } s\in (0,1]\mapsto \frac{f(s)}{s} \text{ decreasing.}
\end{equation}
Since the constant function 0 is always solution, the problem
is to know when there exist non-zero solutions. As we will see here, the existence of such a positive asymptotic profile is characterized by the
sign of the principal eigenvalue of the linearized operator around 0. We now make precise this notion.
\subsection{Principal eigenvalue of the linearized operator}
\label{principal}
To start with, let us define the natural weighted space $$\mathcal{H}=\{v\in
H^1(\mathbb{R}^{N-1}) \, , \, \sqrt{g}u\in
L^2(\mathbb{R}^{N-1})\}$$ and its associated norm. For $v\in \mathcal{H}$, we set $\|v\|_\mathcal{H} = (
\|v\|^2_{H^1}+\|\sqrt{g}v\|^2_{L^2})^{\frac{1}{2}}$.
The linearized operator about 0 is $L\varphi=-\Delta \varphi
+\big(\alpha g(y)-f'(0)\big)\varphi$ for $\varphi \in \mathcal{H}$. We are interested in the
eigenvalues of $L$. Even though the problem is set on all of $\mathbb{R}^{N-1}$, the term
in $\alpha g(y)$ yields compactness of
the injection $\mathcal{H} \hookrightarrow L^2(\mathbb{R}^{N-1})$. Hence the existence
of a principal eigenvalue is obtained as usual.
\begin{theorem} Let us define
$$
R_\alpha(\varphi)= \frac{\int |\nabla \varphi|^2+
\big(\alpha g-f'(0)\big)\varphi^2 }{\int \varphi ^2}.
$$
The operator $L$ has a smallest eigenvalue
\begin{equation}
\lambda_\alpha= \inf_{\varphi \in \mathcal{H} \setminus \{0\} } R_\alpha (\varphi).
\label{QR}
\end{equation}
Moreover there exists a unique positive eigenfunction associated
to $\lambda_\alpha$ of $L^2$-norm equal to 1, called $\varphi_\alpha$ in the following,.
The eigenspace associated to $\lambda_\alpha$ is spanned by $\varphi_\alpha$.
\end{theorem}
\begin{proof}
The proof is classical due to the compactness of $\mathcal{H}
\hookrightarrow L^2(R^{N-1})$. We refer for example to \cite{Evans}.
\end{proof}
\begin{remark}
If $g(y)=|y|^2$, the problem can be rescaled and we obtain the harmonic
oscillator for which principal eigenvalue and eigenfunction are well
known \cite{schwartz}. In that case, we have $\lambda_\alpha=(N-1)\sqrt{\alpha}-f'(0)$ and
$\varphi_\alpha=\left( \frac{\sqrt{\alpha}}{\pi}\right)^{\frac{1}{N-1}}
e^{-\frac{\sqrt{\alpha}}{2}|y|^2}$.
\end{remark}
Since the existence of a positive solution of \eqref{PA} will depend on the
sign of the principal eigenvalue, the following proposition describes the
behavior of $\lambda_\alpha$ as a function of $\alpha$.
\begin{proposition}
\label{lacomp}
The function $\alpha \mapsto \lambda_\alpha$ is continuous, increasing and concave
for $\alpha \in (0, +\infty)$. Moreover $\lim_{\alpha \rightarrow 0}
\lambda_\alpha =-f'(0)$ and for $\alpha$ large enough $\lambda_\alpha>0$.
\end{proposition}
\begin{proof}
Let us fix $\alpha >0$ and $\eta>0$.
Equation \eqref{QR} shows that
$$
\lambda_{\alpha + \eta} \leq \int |\nabla \varphi_\alpha|^2 +\big(
(\alpha+\eta)g-f'(0)\big) \varphi_\alpha^2 =\lambda_\alpha +\eta \int g(y) \varphi_\alpha^2.
$$
Similarly, we obtain $\lambda_\alpha \leq \lambda_{\alpha +\eta} -\eta \int g
\varphi_{\alpha+\eta}^2$. From this we derive that
$$
0< \eta \int g \varphi_{\alpha+\eta}^2 \leq \lambda_{\alpha
+\eta}-\lambda_\alpha \leq \eta \int g\varphi_\alpha^2.
$$
This and similar computation for $\lambda_\alpha - \lambda_{\alpha-\eta}$ yields that $\alpha \mapsto \lambda_\alpha$ is increasing and locally Lipschitz on $(0,+\infty)$.
Concavity is classical. It suffices to observe that for each fixed $\varphi$, $$
\alpha \mapsto R_\alpha(\varphi)= \frac{\int |\nabla \varphi|^2+
(\alpha g-f'(0))\varphi^2 }{\int \varphi ^2}
$$
is an affine function of $\alpha$ and that $\lambda_\alpha= \inf_{\varphi \in \mathcal{H} \setminus \{0\} } R_\alpha (\varphi)$.
In order to prove that $\lambda_\alpha \xrightarrow{\alpha \rightarrow 0}-f'(0)$,
for any $\varepsilon >0$ choose a function $\psi_\varepsilon
$ of compact support with
$\|\psi_\varepsilon\|_{L^2}=1$ and $\int |\nabla \psi_\varepsilon|^2< \varepsilon$. Let $\mathop{\mathrm{supp}}\nolimits \psi_\varepsilon \subset B_{R_\varepsilon}$.
From \eqref{QR} we get
\begin{equation*}
-f'(0) \leq \lambda_\alpha \leq \varepsilon+ \alpha \max_{B_{R_\epsilon}}g-f'(0)
\end{equation*}
So for any $\displaystyle \alpha <\frac{\varepsilon}{\max_{B_{R_\epsilon}}g}$,
$$
-f'(0)\leq \lambda_\alpha \leq -f'(0)+2\varepsilon.
$$
Now we claim that $\lambda_\alpha >0$ for large enough $\alpha$. Argue by contradiction and assume that $\lambda_\alpha \leq 0$ for all $\alpha \in
(0,+\infty)$. Since
$$
0\geq \lambda_\alpha=\int |\nabla \varphi_\alpha |^2+\alpha \int g \varphi_\alpha^2 - f'(0),
$$ therefore
$$
\int g \varphi_\alpha^2 \leq \frac{1}{\alpha} f'(0)
$$
and $\varphi_\alpha \rightarrow 0$ in $L^2(\mathbb{R}^{N-1} \setminus B_R)$ for all $R>0$. Furthermore, $\varphi_\alpha$ is bounded in $\mathcal{H}$ and up to extraction we can assume that $\varphi_\alpha$ converges strongly in $L^2(\mathbb{R}^{N-1})$, thus $\varphi_\alpha$ converges to $0$ in $L^2$ but it is impossible since $\int {\varphi_\alpha}^2=1$ for any $\alpha>0$.
\end{proof}
\begin{corollary}
\label{alpha0}
There exists $\alpha_0 >0$ such that $\lambda_\alpha <0$ for $\alpha <\alpha_0$, $\lambda_{\alpha_0}=0$
and $\lambda_\alpha > 0$ for $\alpha >\alpha_0$.
\end{corollary}
\subsection{If $g$ vanishes on $B_{r_0}$}
The main part of the proof is still correct if $g$ vanishes on $B_{r_0}$ but the result is slightly modified.
In this section, we assume that there exists $r_0>0$ such that \eqref{Hypgpos} is substituted by the following assumption
\begin{equation}
\label{Hypg'}
g:\mathbb{R}^{N-1} \rightarrow \mathbb{R}_+ \in \mathrm{C}^0, \; g\equiv 0 \text{ on } B_{r_0} \text{ and } g>0 \text{ on }\mathbb{R}^{N-1}\setminus B_{r_0}.
\end{equation}
We define $\lambda_\Delta$ the principal eigenvalue of the Laplacian on $B_{r_0}$ with Dirichlet boundary conditions, i.e.
$$
\begin{cases}
-\Delta \phi_0=\lambda_\Delta \phi_0 & \text{on }B_{r_0},\\
\phi_0=0 & \text{on } \partial B_{r_0}.
\end{cases}
$$
In this case, the principal eigenvalue of the linearized operator about 0 is well defined and Proposition \ref{lacomp} becomes
\begin{proposition}
The function $\alpha \mapsto \lambda_\alpha$ is continuous, increasing and concave
for $\alpha \in (0, +\infty)$, and $\lim_{\alpha \rightarrow 0}
\lambda_\alpha =-f'(0)$. Now there are two cases:\\
i) If $f'(0) < \lambda_\Delta$, then for $\alpha$ large enough $\lambda_\alpha>0$. \\
ii) If $f'(0)>\lambda_\Delta$, then $\lambda_\alpha\leq 0$ for all $\alpha>0$.
\end{proposition}
\begin{proof}
The proof of the first part of the proposition is exactly the same as in Proposition \ref{lacomp}. We just have to prove i) and ii).
i) We assume that $f'(0) < \lambda_\Delta$ and argue by contradiction that $\lambda_\alpha \leq 0$ for any $\alpha \in (0, +\infty)$. As in the proof of proposition \ref{lacomp}, we have
$$
\int g \varphi_\alpha^2 \leq \frac{1}{\alpha} f'(0)
$$
but this yields $\varphi_\alpha \rightarrow 0$ in $L^2(\mathbb{R}^{N-1}\setminus B_R)$ for $\alpha \rightarrow +\infty$ and any $R>r_0$.
As before $\varphi_\alpha$ is bounded in $\mathcal{H}$ and up to extraction, we have $\lambda_\alpha \rightarrow \lambda\leq 0$ and the weak convergence in $\mathcal{H}$ and strong convergence in $L^2$ of $\varphi_\alpha$ toward $\phi$. The limit $\phi$ verifies $\int \phi^2=1$, $\phi \equiv 0$ for $|y|>r_0$ and
$$
-\Delta \phi-f'(0) \phi=\lambda \phi
$$
Thus $\phi \in H^1_0(B_{r_0})$ verifies $-\Delta \phi=(f'(0)+\lambda)\phi$ on $B_{r_0}$ and since $\lambda\leq 0$, it is impossible.
ii) By taking $\varphi=\phi_0$ in the Rayleigh quotient \eqref{QR}, where $\phi_0$ is the principal eigenvalue of the above problem in $B_{r_0}$ with Dirichlet boundary conditions, we see that $\lambda_\alpha\leq \lambda_\Delta -f'(0) <0$ for all $\alpha>0$.
\end{proof}
In the following, we will not state the results specifically for this case \eqref{Hypg'}. However, the proofs and results developed here carry over to this case with the obvious modifications.
\subsection{Existence of non-zero asymptotic profile}
\begin{theorem}
For $\alpha \geq \alpha_0$, there is no solution of \eqref{PA}, where $\alpha_0$ is defined in corollary \ref{alpha0}.
For $\alpha < \alpha_0$, there exists a unique positive
solution of \eqref{PA}.
\end{theorem}
\begin{proof}
Let us fix $\alpha \geq \alpha_0$. Then $\lambda_\alpha \geq 0$.
Assume by contradiction that there exists a solution $V$
of \eqref{PA}. Then the strong maximum principle shows that $V>0$.
Since $\varphi_\alpha$ is an eigenfunction of the linearized operator $L$ and
$V$ is solution of \eqref{PA}, we have
\begin{eqnarray*}
\int \left(\Delta V +f(V)-\alpha gV \right)\varphi_\alpha & = & 0 \\
& = & \int (\Delta \varphi_\alpha +(f'(0)-\alpha g)\varphi_\alpha + \lambda_\alpha \varphi_\alpha)V
\end{eqnarray*}
Now from corollary \ref{decry}, $V$ and
$\nabla V$ are rapidly decreasing for
$|y|\rightarrow \infty$ and so we can apply Stokes formula
$\int \Delta V \, \varphi_\alpha= \int V \,\Delta \varphi_\alpha$. It yields
$\displaystyle \int (f(V)-f'(0)V) \varphi_\alpha = \lambda_\alpha \int \varphi_\alpha V$ but
$f(V)-f'(0)V<0$ since $f$ is of KPP type and $ \lambda_\alpha \geq 0$ thus a
contradiction is obtained.
\bigskip
We now turn to the case $\alpha<\alpha_0$.
For $\alpha<\alpha_0$, the eigenvalue $\lambda_\alpha$ is negative. Setting
$\underline{V}=\varepsilon \varphi_\alpha$ with $\varepsilon>0$,
we get
$$
\Delta \underline{V}+f(\underline{V}) - \alpha
g \underline{V}=-\lambda_\alpha \varepsilon \varphi_\alpha+f(\varepsilon\varphi_\alpha)-f'(0)\varepsilon\varphi_\alpha \geq 0
$$
if $\varepsilon>0$ is chosen small enough.
Hence $\underline{V}$ is a sub-solution of \eqref{PA}. The constant function 1 is
a super-solution and
$\underline{V}\leq 1$ if $\varepsilon$ is small enough. Therefore by the sub- and super-solution method,
there exists a
solution $V$ such that $0<\underline{V}\leq V \leq 1$.
Now consider $V$ and $W$ two non-zero solutions of
\eqref{PA}. We argue by contradiction and assume that $V \not\equiv W$. Then
for example $\Omega =\{ y\in \mathbb{R}^{N-1}, \,
V(y)<W(y)\}$ is not empty. Introduce a cutoff function
$\beta \in \mathrm{C}^\infty (\mathbb{R})$ with $\beta=0$ on $(-\infty, 1/2]$,
$\beta=1$ on $[1,+\infty)$ and $0<\beta'<4$ on $(1/2,1)$ and for all $\varepsilon
>0$, let us set $\beta_\varepsilon(s)=\beta \left( \frac{s}{\varepsilon} \right)$.
Using equation \eqref{PA}, we have
\begin{eqnarray*}
\int (-V \Delta W+ \Delta V W)\beta_\varepsilon(W - V)
&=& \int (V f(W)-f(V)W)\beta_\varepsilon(W - V) \\
& &\xrightarrow{\varepsilon \rightarrow 0} \int_{\Omega}(V f(W)-f(V)W)
\end{eqnarray*}
by Lebesgue's dominated convergence theorem.
Owing to corollary \ref{decry}, $V$, $\nabla V$, $W$ and $\nabla W$ have exponential decay and thus Stokes
formula can be applied and we obtain
\begin{eqnarray*}
\int (-V \Delta W+ \Delta V W)\beta_\varepsilon(W - V)
= \int \beta_\varepsilon '(W -V) \nabla
(W-V) .\left( V \nabla W - W \nabla V \right)\qquad \\
= \underbrace{\int \beta_\varepsilon ' (W -V) V |\nabla (W
-V)|^2}_{=I_1} - \underbrace{\int \beta_\varepsilon ' (W -V)(W -V) \nabla (W
-V). \nabla V}_{=I_2} .
\end{eqnarray*}
In the term $I_2$ the integrand satisfies
$$
|\beta_\varepsilon ' (W -V)(W-V)\nabla(W-V).\nabla W| \leq 4|\nabla(W-V)|. |\nabla W|
$$
Therefore by Lebesgue's Theorem of dominated convergence, we infer that $I_2\rightarrow 0$. Next the term $I_1$ satisfies $I_1\geq 0$. Consequently, we may write:
$$
0\geq \int_{\Omega}\big( V f(W)-W f(V) \big)=\int_{\Omega}\left( \frac{f(W)}{W}-\frac{f(V)}{V}\right) VW
$$
which is a contradiction in view of \eqref{HypfKPP} as $W>V$ in $\Omega$. Hence $V=W$ and
the non-zero solution is unique.
\end{proof}
The last point concerns the stability of the asymptotic profiles
for $\alpha <\alpha_0$. Let us start by studying the energy of
$V$. For $w\in \mathcal{H}$, we define the energy
\begin{equation}
J_\alpha (w)=\int_{\mathbb{R}^{N-1}} |\nabla w(y)|^2 +\frac{\alpha}{2} g(y) w^2(y) -
F(w(y)) \,dy
\label{defenergie}
\end{equation}
where $F(u)=\int_0^u f(t)dt$.
\begin{theorem}
For $\alpha<\alpha_0$, the unique positive solution
of \eqref{PA} $V$ is stable in the energy sense, i.e. $V$ is the
global minimum of $J_\alpha$ and, furthermore $J_\alpha (V) <0=J_\alpha(0)$.
\label{energy}
\end{theorem}
\begin{proof}
Owing to the maximum principle, solutions of \eqref{PA} are between 0 and
1. Hence we can modify $f$ on $]-\infty,0[$ such that it becomes odd
and as a consequence, $F$ can be considered as even.
Since $\lambda_\alpha$ the principal eigenvalue of the linearized operator
about the zero solution is negative for $\alpha<\alpha_0$, 0 cannot be the global minimum of $J_\alpha$. Now $J_\alpha$ admits a global minimum that
will be called $\tilde{V}$ for the argument. One can prove that $|\tilde{V}|$ is
also a global minimum of $J_\alpha$ and hence $|\tilde{V}|$ is a positive
solution of \eqref{PA}. By uniqueness, $|\tilde{V}|=V$ and thus
$V$ is a global minimum of $J_\alpha$. Since $0$ is not a
global minimum, necessarily $J_\alpha (V) <0=J_\alpha(0)$.
\end{proof}
We now conclude with the linearized stability of $V$.
\begin{theorem}
For $\alpha <\alpha_0$, consider the linearized operator about
$V$ and denote $\lambda_1[V]$ the principal
eigenvalue of this operator. Then $\lambda_1[V]> 0$.
\end{theorem}
\begin{proof}
Denote by $\psi$ a positive eigenfunction associated with
$\lambda_1[V]$ and assume by contradiction that
$\lambda_1[V]\leq 0$. If $\lambda_1[V]<0$, it is easy to see that for
$\varepsilon>0$ small enough $V +\varepsilon \psi<1$ is a
sub-solution of \eqref{PA}. From there, it would follow that there exists a solution
of \eqref{PA} between $V +\varepsilon \psi$ and $1$ but this
contradicts the uniqueness of $V$.
Now if $\lambda_1[V]=0$.
With the same notations as above, $\psi$ and $\phi$ verify
\begin{equation}
\label{psi}
-\Delta \psi =f'(V)\psi-\alpha g(y) \psi
\end{equation}
and
\begin{equation}
\label{phia}
-\Delta \phi =(f'(0)+\lambda_\alpha)\phi-\alpha g(y) \phi
\end{equation}
Multiplying \eqref{psi} by
$\phi$ and \eqref{phia} by $\psi$ and integrating by parts yields
$$
\int f'(V) \psi \phi =\int (f'(0)+\lambda_\alpha) \psi \phi.
$$
Since $\phi \psi >0$, we obtain $f'(V(y))=f'(0)+\lambda_\alpha$ for all $y\in \mathbb{R}^{N-1}$ and this is impossible since $\lambda_\alpha <0$ and $V(y)\rightarrow 0$ for $|y|\rightarrow +\infty$.
\end{proof}
\section{Traveling fronts for a KPP non-linearity}
\label{section: cetoile}
This section is devoted to the definition of a speed $c^*$ for which a
traveling front of equation \eqref{EP} exists for $\alpha\in (0,\alpha_0)$. The threshold
of existence of the non-zero asymptotic profile is called
$\alpha_0$ as in the previous section. For $0<\alpha <\alpha_0$,
$V$ denotes the unique non-zero asymptotic
profile. As shown in the previous section, the
energy of the non-zero profile $J_\alpha(V)$ is
negative.
A curved traveling front of speed $c$ is a function
$u(x_1-ct,y)$ solution of
equation \eqref{EP} and connecting the non-zero asymptotic state $V$
to 0. Thus we are looking for a solution of
\begin{equation}
\begin{cases}
-\Delta u - c\partial_1 u + \alpha g(y) u=f(u), \quad x=(x_1,y)\in
\mathbb{R}^N \\
u(x_1,.)\xrightarrow{x_1\rightarrow -\infty} V, \quad
u(x_1,.)\xrightarrow{x_1\rightarrow +\infty} 0 \; \text{uniformly in }
y\in \mathbb{R}^{N-1},\\
u\geq 0, \quad u \text{ bounded}
\end{cases}
\label{FPc}
\end{equation}
where $c\in \mathbb{R}$ is also an unknown of the problem.
The construction of $c^*$ in Theorem \ref{existFP} uses the sliding method following ideas of \cite{BN91}. Note however that there are important differences with \cite{BN91}. In that paper, the KPP case is derived by first solving the ``combustion non-linearity'' and then approach the KPP non-linearity as a limiting case of truncated functions. Contrary to \cite{BN91} here, we derive directly the existence of a solution of the KPP case. Actually the method we present here can be applied to somewhat simplify the proof of \cite{BN91} in the KPP case for cylinder with Neumann conditions.
\subsection{Problem on a domain bounded in $x_1$.}
Let us fix $a>1$ and $c\in \mathbb{R}$ for this subsection and consider the
following problem:
\begin{equation}
\begin{cases}
-\Delta u - c\partial_1 u + \alpha g(y) u=f(u), \quad x=(x_1,y)\in
(-a,a)\times \mathbb{R}^{N-1} \\
u(-a,\cdot)=V, \quad u(a,\cdot)= 0, \\
u \geq 0, \quad u \text{ bounded}.
\end{cases}
\label{FPB}
\end{equation}
The aim of this subsection is to prove the following theorem:
\begin{theorem}
\label{boite}
There exists a unique solution of (\ref{FPB}), denoted $u_a^c$ in
the following. This solution decreases in the $x_1$-direction,
i.e. $\partial_1 u_a^c<0$. Thus $0 <u_a^c<V$ on $(-a,a)\times
\mathbb{R}^{N-1}$. Moreover $c \mapsto u_{a}^c$ is decreasing and
continuous from $\mathbb{R}$ to $L^\infty([-a,a]\times
\mathbb{R}^{N-1})$.
\end{theorem}
To prove this theorem, we require the following two
propositions.
\begin{proposition}
\label{majoration}
Let $u$ be a solution of \eqref{FPB}. Then $u(x_1,y) \leq V(y)$ for
$(x_1,y)\in [-a,a]\times \mathbb{R}^{N-1}$.
\end{proposition}
\begin{proof}
Let $M\geq 1$ be such that $u\leq M$ and consider $\psi_R$ defined on
$B_R$ the largest solution of
\begin{equation}
\label{psiR}
\begin{cases}
-\Delta_y \psi_R +\alpha g(y)\psi_R=f(\psi_R) \quad \text{for } y\in B_R,\\
\psi_R=M \quad \text{for }y \in \partial B_R, \qquad 0 \leq \psi_R
\leq M.
\end{cases}
\end{equation}
Here we think of $f$ as having been extended by $0$ outside $[0,1]$. Since $f(s)\leq 0$ for all $s\geq 1$, we observe that:
\begin{itemize}
\item by the strong maximum principle, $0<\psi_R<M$ on $B_R$.
\item since $V \leq 1 \leq M$ and $V$ is a sub-solution of
\eqref{psiR}, through monotone iterations we have $V\leq \psi_R$.
\item if $R'>R$, $\psi_{R'}$ is once again a sub-solution of
\eqref{psiR} on $B_R$ and thus $\psi_{R'}\leq \psi_R$ on $B_R$.
\item therefore $\psi_R$ tends to a function when
$R\rightarrow +\infty$ and through local elliptic estimates, this
function is a non-zero solution ($\geq V$) of the asymptotic problem
\eqref{PA}. By uniqueness, we obtain $\psi_R
\xrightarrow{R \rightarrow +\infty} V$
\end{itemize}
Now we consider the problem
\begin{equation}
\label{recw}
\begin{cases}
-\Delta w -c\partial_1 w+\alpha
g(y)w=f(w) \quad \text{ for } x \in (-a,a)\times B_R,\\
w=M \quad \text{for }x\in (-a,a)\times \partial B_R, \qquad w=\psi_R \quad
\text{for } x_1=\pm a, y\in B_R.
\end{cases}
\end{equation}
The solution $u$ of \eqref{FPB} is a sub-solution of \eqref{recw} and
the constant function $M$ is a super-solution. Using monotone iterations
starting from the super-solution $M$, we build the same sequence as
previously (for problem \eqref{psiR}) since by induction the solutions
do not depend on $x_1\in (-a,a)$. Hence the sequence converges toward
$\psi_R$ and we have $u\leq \psi_R \leq M$. Now letting $R \rightarrow +\infty$
yields $u\leq V$.
\end{proof}
\begin{proposition}
\label{comparison}
Let $R$ be such that $g(y) >\frac{K}{\alpha}$ for $y \not\in \overline{B_R}$ where $K$ is the
Lipschitz norm of $f$ on $[0,1]$. We set $\Omega=I\times
(\mathbb{R}^{N-1}\setminus \overline{B_R})$ where $I$ is an open bounded interval
of $\mathbb{R}$.
Suppose $u$ and $v\in \mathrm{C}^2(\Omega)\cap \mathrm{C}^0(\overline{\Omega})$ are
solutions of
\begin{equation}
\label{jeq}
-\Delta w - c\partial_1 w + \alpha g(y) w=f(w) \quad \text{on } \Omega
\end{equation}
and $u\leq v$ on $\partial \Omega$. Then $u\leq v$ on $\Omega$.
\end{proposition}
\begin{proof}
By contradiction, suppose this is not true. Due to corollary
\ref{decry} and proposition \ref{majoration}, $u(x_1,y)$ and
$v(x_1,y)$ converge uniformly to 0 for $|y|\rightarrow +\infty$. Consequently, there
exist $(x_0,y_0)\in \Omega$ such that
$$
0>\min_{\overline{\Omega}} (v-u) =(v-u)(x_0,y_0).
$$
Since $(x_0,y_0)\in \Omega$, we have $\partial_1 (v-u)(x_0,y_0)=0$
and $\Delta (v-u)(x_0,y_0)\geq 0$, and subtracting the equation \eqref{jeq} with $u$
from the one with $v$, we obtain
$$
\alpha g(|y_0|)(v-u)(x_0,y_0) \geq f(v(x_0,y_0))-f(u(x_0,y_0)) \geq -K
|(v-u)(x_0,y_0)|
$$
which is impossible since $\alpha g(|y_0|) >K$ and $(v-u)(x_0,y_0)<0$.
\end{proof}
Let us now turn to the proof of Theorem \ref{boite} using
sliding method.
\medskip\\
First $\overline{u}(x,y)=V(y)$ is a super-solution, 0 is a
sub-solution and $0\leq \overline{u}$, so by monotone iterations, there
exists a solution $u$ of \eqref{FPB}.
\begin{lemma}
Assume $u$ and $v$ are two
solutions of (\ref{FPB}). Then
$$
v(x_1+h,y)\leq u(x_1,y) \text{ for all } h \in [0,2a) \text{ and all } (x_1,y)\in [-a,a-h]\times \mathbb{R}^{N-1}.
$$
\end{lemma}
\begin{proof}[Proof of the lemma]
By proposition \ref{majoration}, we have
$0\leq u\leq V$ (resp. $0\leq v \leq V$) and using the strong
maximum principle, we obtain $0<u<V$ (resp. $0<v<V$) on
$(-a,a)\times \mathbb{R}^{N-1}$.
For $h\in [0,2a)$, let $I_h=(-a,a-h)$ and for $(x_1,y)\in \overline{I_h}\times \mathbb{R}^{N-1}$, set $v_h(x_1,y)=v(x_1+h,y)$.
Let us fix $R>0$ such that $g(y
)>\frac{K}{\alpha}$ for $y \not\in \overline{B_R}$. By compactness and
continuity of $u$ and $v$, there exists $\varepsilon>0$ such that $v_h \leq
u$ on $\overline{I_h} \times \overline{B_R}$ for any $h$ such that $2a-\varepsilon\leq h < 2a$. Proposition \ref{comparison} shows that $v_h \leq u$ on $\overline{I_h} \times
\mathbb{R}^{N-1}$ for any $h\geq 2a-\varepsilon$.
This enables us to define $$h^*= \inf \{h\geq 0, \, v_h\leq u \text{ on }
\overline{I_h} \times \mathbb{R}^{N-1}\}.$$
Let us prove that $h^*=0$ and argue by contradiction that $h^*>0$.
By continuity, $v_{h^*}\leq u$ on $\overline{I_{h^*}} \times \mathbb{R}^{N-1}$.
Suppose that $\displaystyle \min_{I_{h^*} \times B_R} u-v_{h^*}>0$. This would imply that for $h^*-h>0$ small, $\displaystyle \min_{I_{h} \times B_R} u-v_{h}>0$ and by Proposition \ref{comparison}, $v_h\leq u$ on $I_h \times \mathbb{R}^{N-1}$ in contradiction with the definition of $h^*$. Therefore $\displaystyle \min_{I_{h^*} \times B_R} u-v_{h^*}=0$. This implies the existence of $(x_1^*,y^*)\in I_{h^*}\times \overline{B_R}$ such that $v_{h^*}(x_1^*,y^*)=u(x_1^*,y^*)$ (note that $u-v_{h^*}>0$ for $x_1=-a$ or $a-h^*$). Writing in the usual way that $u-v_{h^*}$ is solution of a linear elliptic equation in $I_{h^*}\times \mathbb{R}^{N-1}$ and $u-v_{h^*}\geq 0$ with $u-v_{h^*}$ vanishing at the point $(x_1^*,y^*)$, the strong maximum principle implies that $u-v_{h^*}\equiv 0$ which is impossible.
\end{proof}
Applying the preceding lemma with $h=0$ yields the uniqueness of
the solution of \eqref{FPB}. Taking $u=v=u_a^c$, one sees that $u_a^c$ is monotone decreasing. Thus $\partial _1 u_{a}^c \leq 0$ and deriving
equation \eqref{FPB} and applying once more the maximum principle gives
$\partial _1 u_{a}^c<0$.
It remains to study the behavior of $u_{a}^c$ with respect to
$c$. The continuity is deduced from the uniqueness of the
solution and a priori estimates in the standard way. Now let $c_1<c_2$ and denote by $u_1$ (resp. by $u_2$) the
solution of \eqref{FPB} with $c=c_1$ (resp. $c=c_2$). Since $\partial _1 u_1<0$,
$$
\Delta u_1 +c_2 \partial_1 u_1 +f(u_1)-\alpha g(y)
u_1=(c_2-c_1)\partial_1 u_1 <0
$$
and $u_1>0$ is a super-solution of equation \eqref{FPB} with
$c=c_2$. By uniqueness of the solution, necessarily $u_2\leq
u_1$. Once more the strong maximum principle implies $u_2<u_1$.
\subsection{Convergence to a solution on $\mathbb{R}^N$}
Now that the equation is solved on a domain bounded in the $x_1$-direction, the idea is
to increase $a$ up to infinity so that the domain
tends to $\mathbb{R}^N$. However if $c$ is chosen
arbitrarily, the function $u_a^c$ may converge toward the constant 0
or to $V$ when $a$ tends to infinity. Hence we adopt a normalization method as in \cite{BN91}. The following
theorem will define the value of the speed $c$ depending on $a$ to
avoid those situations. We recall that since $\alpha<\alpha_0$,
$\lambda_\alpha$ the principal eigenvalue of the linearized operator
about the solution 0 is negative.
\hspace{-7mm}
\begin{minipage}{0.62\textwidth}
\begin{theorem}
\label{ca}
Let us fix $\varepsilon>0$. Let $\delta >0$ be such that $\delta
< -\lambda_\alpha\leq f'(0)$. Let $\eta >0$ be such that $\forall
s\in [0, \eta] \; f(s)\geq (f'(0)-\delta)s$. We fix $\theta \in
(0, \frac{\eta}{2})$.\\
Then there exists $A(\varepsilon)>0$ such that for all $a\geq A(\varepsilon)$, there exists a
unique speed $c_a\in (0,2\sqrt{-\lambda_\alpha}+\varepsilon]$ with
$u_a^{c_a}(0,0)=\theta$.
\end{theorem}
\end{minipage} \hfill
\begin{minipage}{0.35\textwidth}
\resizebox{\textwidth}{!}{\input{eta.pspdftex}}
\end{minipage}
\begin{proof}
By continuity and monotonicity, it suffices to prove:
\begin{enumerate}[i)]
\item $u_a^0(0,0)>\theta$,
\item $u_a^c(0,0)<\theta$ for $c>2\sqrt{-\lambda_\alpha}+\varepsilon$
and $a$ large enough.
\end{enumerate}
i) Assume $c=0$. Let $\varphi_\alpha$ be the positive eigenfunction
of the linearized operator $L$ associated with the first eigenvalue
$\lambda_\alpha <0$ and with the normalization $\|\varphi_\alpha\|_\infty=1$.
Let us introduce
$v(x_1,y)=h(x_1)\varphi_\alpha(y)$ for $(x_1,y)\in [-a,a]\times \mathbb{R}^N$ where
$h(x_1)=\eta \frac{a-x_1}{2a}$. Then $0<v\leq \eta$ on $[-a,a]\times \mathbb{R}^N$,
which yields
\begin{eqnarray*}
-\Delta v + \alpha g(y) v-f(v) &\leq& -\Delta v+\alpha g(y)v -
(f'(0)-\delta)v \\
& = & (\lambda_\alpha+\delta)v\leq 0.
\end{eqnarray*}
Moreover by construction of $V$ (cf section \ref{section: asympt}), $v(-a,y)=\eta \varphi_\alpha (y)<V(y)$ if $\eta$ is small enough. Then
$v(a,y)=0$ and $v\leq 1$. Hence, $v$ is a sub-solution of \eqref{FPB} for $c=0$. Thus
$v\leq u_a^0$ and therefore, $u_a^0(0,0) \geq
v(0,0)=\frac{\eta}{2}>\theta$.
ii) Let us construct an explicit super-solution for
$c>2\sqrt{-\lambda_\alpha}+\varepsilon$. We recall from section \ref{principal} that
$\lambda_\beta <\lambda_\alpha$ for $\beta<\alpha$ and that
$\lim_{\beta\rightarrow \alpha}\lambda_\beta=\lambda_\alpha$.
Thus there exists $\beta\in
(0,\alpha)$ such that $2\sqrt{-\lambda_\beta}\leq 2\sqrt{-\lambda_\alpha}+\varepsilon$.
As before, let $\psi_\beta$ denote the positive eigenfunction
of the linearized operator $L$ associated with the first eigenvalue
$\lambda_\beta <0$ with the normalization $\|\psi_\beta
\|_\infty=1$. Choose $R$ such that fro all $r\geq R$ $(\alpha
-\beta)g(r)+\lambda_\beta>0$, $\alpha g(r)>f'(0)$, and $k>0$ is such
that $k \psi_\beta \geq V$ on $\overline{B}_R$. The constant $k$ only depends
on $\beta$ hence on $\varepsilon$.
\begin{lemma}
Then $k \psi_\beta\geq V$ on $\mathbb{R}^{N-1}$.
\end{lemma}
\begin{proof}[Proof of the lemma]
We follow a similar proof to that of lemma \ref{comparison}: \\
If the lemma does not stand, since
$k\psi_\beta-V$ tends to 0 at $\infty$, there exists $y_0 \in\mathbb{R}^{N-1}\setminus \overline{B}_R$
such that $(k\psi_\beta-V)(y_0)=\min_{\mathbb{R}^{N-1}} (k\psi_\beta-V)<0$. At
this point, $\Delta (k\psi_\beta-V)\geq 0$
but
\begin{eqnarray*}
-\Delta (k\psi_\beta-V)+\alpha g(y)
(k\psi_\beta-V)-f'(0)(k\psi_\beta-V)
= \qquad \qquad \qquad \qquad \\
\big((\alpha-\beta)g(|y_0|) +\lambda_\beta\big) \psi_\beta
+f'(0)V-f(V)
\geq \big((\alpha-\beta)g(|y_0|)+\lambda_\beta\big)\psi_\beta.
\end{eqnarray*}
By the choice of $R$, we get $\Delta (k\psi_\beta-V)(y_0)<0$ which yields
a contradiction.
\end{proof}
Let us now build the super-solution when $c>2\sqrt{-\lambda_\alpha}+\varepsilon$. We set
$w(x_1,y)=z(x_1)k\psi_\beta(y)$ where $z$ is the solution of
$
\begin{cases}
z''+cz'-\lambda_\beta z=0 \quad \text{on } (-a,a), \\
z(-a)=1, \quad z(a)=0.
\end{cases}
$
Then $w$ verifies
$$
\begin{cases}
-\Delta w-c\partial_{1}w+\alpha
g(y)w=(\alpha-\beta)g(y)w+f'(0)w\geq f(w) \\
w(-a,.)=k\psi_\beta\geq V, \quad w(a,.)=0.
\end{cases}
$$
so it is indeed a super-solution of \eqref{FPB}. Moreover
$$
z(x)=\frac{e^{\rho_+(x-a)}-e^{\rho_-(x-a)}}{e^{-\rho_+2a}-e^{-\rho_-2a}} \geq 0
$$
where $\rho_-<\rho_+<0$ are the roots of $\rho^2+c\rho-\lambda_\beta=0$,
i.e. $\rho_\pm = \frac{-c\pm\sqrt{c^2+4\lambda_\beta}}{2}$ (note that $c^2+4\lambda_\beta\geq 0$). Hence
\begin{eqnarray*}
0 <
u_a^c(0,0)&<&w(0,0)=\frac{e^{-\rho_+a}-e^{-\rho_-a}}{e^{-\rho_+2a}-e^{-\rho_-2a}}k\psi_\beta(0)
\\
&=&\frac{1}{e^{-\rho_+a}+e^{-\rho_-a}}k\psi_\beta(0)\leq e^{-\frac{c}{2}a}k
\end{eqnarray*}
Thus if $a$ is large enough to have
$e^{-\frac{c}{2}a}<\frac{\theta}{k}$, then for any $c>2\sqrt{-\lambda_\alpha}+\varepsilon$, we get $u_a^c(0,0)<\theta$.
\end{proof}
With the bounds on the speed $c_a$ it is now possible to pass to the limit as $a$ tends to infinity.
\begin{proposition}
\label{ainfty}
There exists a sequence $(a_j)_{j\in \mathbb{N}}$ such that
$a_j \rightarrow +\infty$, $c_{a_j}\rightarrow c^*\in [0, 2\sqrt{-\lambda_\alpha}]$ and
$u_{a_j}^{c_{a_j}}\rightarrow u$ in $\mathrm{C}^2_{loc}(\mathbb{R}^N)$. The limit $u$
is solution of
\begin{equation}
\label{systainf}
\begin{cases}
-\Delta u -c^* \partial_1 u +\alpha g(y) u = f(u) \quad \text{on } \mathbb{R}^N \\
0 \leq u\leq V, \quad u(0,0)=\theta, \quad \partial_1 u \leq 0.
\end{cases}
\end{equation}
Then $u$ is necessarily a traveling front solution of \eqref{FPc} with
$c=c^*$.
\end{proposition}
\begin{proof}
First for $j\in \mathbb{N}$, fix $\varepsilon_j=\frac{1}{j+1}$ and $a_j\geq
A(\varepsilon_j)$ with $a_j\rightarrow +\infty$.
Since $c_{a_j}\in (0,2\sqrt{-\lambda_\alpha}+\varepsilon_j]$ is bounded,
$u_{a_j}^{c_{a_j}}$ is uniformly bounded in
$\mathrm{C}^{2,\gamma}$ for any $\gamma \in (0,1)$. Hence up to an extraction of a subsequence,
there exist $c^*\in [0,2\sqrt{-\lambda_\alpha}]$ and $u\in \mathrm{C}^2_{loc}$ such that
$c_a\rightarrow c^*$ and $u_a^{c_a}\rightarrow u$. Clearly the function $u$ is
a solution of \eqref{systainf}. Owing to the normalization $u(0,0)=\theta$, $u$ is not a constant, moreover by the maximum
principle $0<u<V$ and $\partial_xu<0$. Since $u$ is decreasing in
$x$, $\displaystyle u_\pm =\lim_{x\rightarrow \pm \infty} u(x,\cdot)$ are
solutions of \eqref{PA} and $u_-(0)>\theta>0$ and $0 \leq u_+
(0)<\theta$. This implies that $u_-=V$ and $u_+\equiv
0$. Thus $u$ is indeed a traveling front solution of \eqref{FPc}.
\end{proof}
\subsection{Existence of traveling front for $c\geq 2\sqrt{-\lambda_\alpha}$.}
\label{section: exist}
In this section we still assume $0<\alpha <\alpha_0$ and we will
prove the following theorem.
\begin{theorem}
\label{existc*}
There exists a
traveling front of speed $c$ of equation \eqref{FPc} if and only if $c\geq
2\sqrt{-\lambda_\alpha}$.
\end{theorem}
We start with the Proposition
\begin{proposition}
\label{minc*}
For $c<2\sqrt{-\lambda_\alpha}$ there exists no traveling front solution of
\eqref{FPc}. Thus $c^*=2\sqrt{-\lambda_\alpha}$ (where $c^*$ is the traveling speed constructed in the previous section).
\end{proposition}
\begin{proof}
We argue by contradiction and assume that there exists a traveling front $u$ of
speed $c<2\sqrt{-\lambda_\alpha}$ of \eqref{FPc}.
We are going to construct a
small positive sub-solution with compact support.
To this end, we can find $\delta\in (0, f'(0))$ such that
$c^2+4(\lambda_\alpha+2\delta)<0$ and
$\eta>0$ such that for all $s\in [0,\eta]$, $f(s)\geq (f'(0)-\delta) s$.
Since the linearized operator $L=-\Delta +\alpha g(y)-f'(0)$ is
self adjoint, the principal eigenvalue $\lambda_\alpha$ is the limit of the
Dirichlet principal eigenvalue in $B_R$ when $R\rightarrow \infty$ (see \cite{HBLR06} for more details):
\begin{equation}
\label{vpR}
\begin{cases}
-\Delta \psi^R+\alpha g(y) \psi^R-f'(0)\psi^R=\lambda_\alpha^R \psi^R, \quad y\in
B_R, \\
\psi^R>0 \quad \text{on } B_R, \quad \psi^R=0 \quad \text{on } \partial B_R.
\end{cases}
\end{equation}
Precisely $\lambda_\alpha^R >\lambda_\alpha$ and $\lambda_\alpha^R \xrightarrow{R\rightarrow \infty}
\lambda_\alpha$. In the following, $\psi^R$ denotes the positive eigenfunction with
$\|\psi^R\|_\infty=1$ and let us fix $R$ sufficiently large so that
$\lambda_\alpha <\lambda_\alpha^R<\lambda_\alpha+\delta$.
Let $\tilde{\sigma}=\sigma+i \frac{\pi}{2L}$, $L>0$, be an imaginary root of
$
X^2+c X -\lambda_\alpha^R -\delta =0
$
which is possible since $c^2 +4(\lambda_\alpha^R+\delta) <c^2 +4(\lambda_\alpha +
2\delta)<0$. Finally let us fix $\varepsilon>0$
small enough such that $\varepsilon e^{\sigma x_1}<\eta$ and $\varepsilon e^{\sigma
x_1} \psi^R(y)<u(x_1,y)$ for $x\in [-L,L]$ and $y\in \overline{B}_R$.
We set
\begin{equation}
\label{constrss}
w(x_1,y)= \begin{cases}
\varepsilon e^{\sigma x_1} \cos(\frac{\pi}{2L}x_1) \psi^R(y) \text{ if }
-L<x_1<L, \; y\in B_R, \\
0 \text{ otherwise.}
\end{cases}
\end{equation}
Then $w$ verifies
$$
-\Delta w - c\partial_1 w+\alpha g(y) w=(f'(0)-\delta)w \leq f(w)
$$
since $0 \leq w \leq \eta$. Moreover $w\leq
u$ and $w>0$ on $(-L,L)\times B_R$. Thus $w$ is a generalized sub-solution
with compact support \cite{BL80}.
Let us now derive a contradiction with the existence of a traveling
front $u$. Translate $u$ to the left by defining
$u_\tau(x_1,y)=u(x_1+\tau,y)$ for $\tau>0$. Since $u(x_1,.)
\xrightarrow{x_1\rightarrow +\infty} 0$, there exists $\tau^*\geq
0$ such that $u_{\tau^*}\geq w$ but
$u_{\tau^*}(x_1^*,y^*)=w(x_1^*,y^*)$. Since $u_{\tau^*}>0$, $x_1^*\in (-L,L)$
and $y^*\in B_R$ (an interior point of the support of $w$). Now since
$w$ is a sub-solution, the strong maximum principle yields
$u_{\tau^*}\equiv w$ on $[-L,L]\times \overline{B_R}$, but this is
impossible on the boundary.
\end{proof}
We have already proved that for $c<2\sqrt{-\lambda_\alpha}$, there exists no traveling front of speed
$c$ solution of \eqref{FPc} and that for $c=c^*=2\sqrt{-\lambda_\alpha}$ there
exists a traveling front of speed $c$. Let us prove that for any $c>c^*$
there exists at least a traveling front to conclude with Theorem \ref{existc*}. The proof goes as usual. We consider
the following problem
\begin{equation}
\begin{cases}
-\Delta u - c\partial_1 u + \alpha g(y) u=f(u), \quad x=(x_1,y)\in
(-a,a)\times \mathbb{R}^{N-1} \\
u(-a,\cdot)=u^*(-a+r,\cdot), \quad u(a,\cdot)= u^*(a+r,\cdot)
\end{cases}
\label{FPar}
\end{equation}
where $u^*$ is the traveling front of speed $c^*$.
The function $u^*(\cdot+r,\cdot)$ is a strict super-solution of \eqref{FPar}
(since $c>c^*$) when 0 is a strict sub-solution and $0<u^*(\cdot+r,\cdot)$. Hence as in
theorem \ref{boite}, it can be proved that there exists a unique
solution $v_a^r$ of \eqref{FPar} and moreover $\partial_x w_a^r<0$ and
$$\forall (x_1,y)\in [-a,a]\times \mathbb{R}^{N-1} \quad V(y)>u^*(-a+r,y)\geq
v_a^r(x_1,y)\geq u^*(a+r,y)>0.$$
By uniqueness, $w_a^r$ depends continuously on $r\in \mathbb{R}$, so
$w_a^r \xrightarrow{r\rightarrow +\infty}0$ and
$w_a^r \xrightarrow{r\rightarrow -\infty} V$ uniformly on
$[-a,a]\times \mathbb{R}^{N-1}$. Let us denote $u_a=v_a^r$ where $r$ is chosen
in order that $v_a^r(0,0)=\theta$ (see previous section for definition
of $\theta$). Once again taking any sequence $a_n\rightarrow +\infty$,
up to an extraction $u_{a_n} \rightarrow u$ in $\mathrm{C}^2_{loc}$ and $u$ is
a traveling front of speed $c$ solution of \eqref{FPc}.
\section{The case of a KPP non-linearity. Asymptotic speed of spreading.}
\label{section: convergence}
This section is concerned with the asymptotic behavior of the
solutions of the parabolic problem
\begin{equation}
\label{parabolique}
\begin{cases}
\partial_t u -\Delta u =f(u) -\alpha g(y) u \quad
&\text{on } \mathbb{R}\times \mathbb{R}^N\\
u(0,x)=u_0(x) & \text{on } \mathbb{R}^N
\end{cases}
\end{equation}
where $f$ is KPP and $u_0$ is an initial condition at least bounded.
\subsection{Extinction for $\alpha\geq \alpha_0$}
Let us fix $\alpha \geq \alpha_0$. We recall that there is no positive
asymptotic profile of \eqref{PA}.
\begin{theorem}
For $u_0\in L^\infty$, there exists a unique solution $u(t,x)$ of
\eqref{parabolique} and it verifies $u(t,x) \xrightarrow{t\rightarrow
+\infty} 0$ uniformly for $x\in \mathbb{R}^N$.
\end{theorem}
This section is devoted to the proof of this theorem.
Let us fix $S=\max(1,\|u_0\|_{\infty})$. Then the constant functions
$0$ and $S$ are respectively sub- and super-solutions of
\eqref{parabolique}. Thus there exists $u(t,x)$ a solution of
\eqref{parabolique} such that $0\leq u\leq S$. By the parabolic
maximum principle, this solution is unique.
Let us define $w$ the solution of \eqref{parabolique} with the
initial condition $w(0,x)=S$. Since the problem and the initial
condition do not depend on $x_1$, neither does $w$ thus we will write
$w(t,y)$. By the
maximum principle, $0\leq u\leq w\leq S$ and since $S$ is a
super-solution, $\partial_t w\leq 0$. Thus
$w(t,y)\xrightarrow{t\rightarrow +\infty} W(y)$ and
$$
0\leq \limsup_{t\rightarrow +\infty} u \leq W\leq S.
$$
Now by parabolic local estimates, $W$ is necessarily solution of
$$
-\Delta_y W=f(W)-\alpha g(y) W
$$
and thus is a nonnegative asymptotic profile. Since $\alpha \geq
\alpha_0$, $W\equiv 0$. So $u(t,x)$ tends to 0 for $t\rightarrow +\infty$
uniformly in $\mathbb{R}^{N}$.
\subsection{Spreading for $\alpha < \alpha_0$}
In this section we assume $\alpha<\alpha_0$. So there exists a critical
speed $c^*$ of existence of traveling front for \eqref{FPc}. We assume
that $u_0\in \mathrm{C}^0_0(\mathbb{R}^N)$, i.e. $u_0$ is continuous and compactly
supported, and that $u_0<V$ where $V$ is the positive asymptotic
profile solution of \eqref{PA}. We will prove the spreading
of the solution of \eqref{parabolique} but we first need the following theorem.
\begin{theorem}
\label{existz}
The unique solution of
\begin{equation}
\label{uniq}
\begin{cases}
-\Delta z -c \partial_1 z+\alpha g(y) z=f(z) \quad (x_1,y)\in \mathbb{R}^N,\\
0< z(x_1,y) \leq V(y) \quad (x_1,y)\in \mathbb{R}^N.
\end{cases}
\end{equation}
with $c<c^*$ is $z(x,y) \equiv V(y)$.
\end{theorem}
\begin{proof}
Let us consider the generalized sub-solution with compact support $w(x_1,y)$ defined in \eqref{constrss}. This is possible since $c<c^*$. Up to a decrease of $\varepsilon>0$, we can assume that
$w\leq z$ on $\mathbb{R}^N$. Now by applying the sliding method to $w^\tau$ where $w^\tau(x_1,y)=w(x_1+\tau,y)$ and $z$, one can prove that $w^\tau \leq z$ for all $\tau \in \mathbb{R}$. We can thus define
$$
\forall y\in \mathbb{R}^{N-1} \quad \underline{z}(y)=\inf_{x_1\in \mathbb{R}} z(x_1,y) \geq 0
$$
and state that $ z\not\equiv 0$.
Now $\underline{z}$ is a super-solution of \eqref{PA} since $\underline{z}=\inf_{h\in \mathbb{R}} z(\cdot+h,\cdot)$ and an infimum of solutions is a super-solution.
Finally as in section \ref{section: asympt}, we can build a positive sub-solution of \eqref{PA} smaller than $\underline{z}$ and thus by monotone iteration we have a solution of \eqref{PA} between these sub- and super-solution. By uniqueness of the positive solution, we obtain $V\leq z $. And due to condition in \eqref{uniq}, we have $z\equiv V$.
\end{proof}
Let us now turn to the precise study of the spreading of the solution of \eqref{parabolique}
\begin{theorem}
\label{thmcv}
For $u_0\in \mathrm{C}^0_0(\mathbb{R}^N)$ with $u_0< V$, there exists a unique
solution $u$ of \eqref{parabolique} and
\begin{eqnarray}
\label{cgrd}
\text{for any }c>c^* \quad \lim_{t\rightarrow +\infty} \sup_{|x_1|\geq ct}
u(t,x)=0, \\
\label{cpt}
\text{for any } c \text{ with } 0\leq c <c^* \quad \lim_{t\rightarrow +\infty} \sup_{|x_1| < ct} |u(t,x)-V(y)|=0.
\end{eqnarray}
\end{theorem}
\begin{proof}
Fix $c>c^*$. Let $U$ denote a traveling front of speed $c^*$. Since
$U(x_1,\cdot)\rightarrow V$ for $x_1\rightarrow -\infty$ locally
uniformly, there exists $L\in \mathbb{R}$ such that $U(x_1-L,y) >u_0(x_1,y)$
for all $(x_1,y)\in \mathbb{R}^N$. Now considering $v(t,x)=U(x_1-L-c^*t,y)$
and applying the comparison principle, we have
$u(t,x)\leq v(t,x)$ for all $t\geq 0$ and $x\in \mathbb{R}^N$.
Thus since $U$ is decreasing in $x_1$
$$
\sup_{|x_1|>ct} u(t,x)\leq \sup_{|x_1|>ct} U(x_1-L-c^* t,y) = \sup_{y\in \mathbb{R}^{N-1}} U((c-c^*)t-L,y).
$$
Since $c>c^*$ and $U(x_1,y)\xrightarrow{x_1\rightarrow +\infty} 0$ uniformly in $y\in \mathbb{R}^{N-1}$. We see that $\sup_{x_1\geq ct} u(t,x) \rightarrow 0$ as $t\rightarrow +\infty$. Since $u(t,-x_1,y)$ satisfies the same equation \eqref{parabolique}, this shows that $\sup_{x_1\leq -ct} u(t,x) \rightarrow 0$ as well as $t\rightarrow +\infty$. Thus \eqref{cgrd}
is proved.
Assume now $c<c^*$. Let us first prove the following weaken version of \eqref{cpt}:
\begin{lemma}
\label{cvp}
For any $c\in \mathbb{R}$ with $|c|\leq c^*$,
\begin{equation}
\label{cptw}
\forall (x_1,y)\in \mathbb{R}^N \quad \lim_{t\rightarrow +\infty} |u(t,x_1-ct,y)-V(y)| =0.
\end{equation}
\end{lemma}
\begin{proof}[Proof of lemma \ref{cvp}]
Let us assume that $c\geq 0$, the proof being similar for $c\leq 0$.
Let $v(t,x_1,y)=u(t,x_1-ct,y)$. Then $v$ satisfies the equation
\begin{equation}
\label{pardec}
\partial_t v-\Delta v -c \partial_1 v+\alpha g(y)v=f(v)
\end{equation}
with the initial datum $v(0,x_1,y)=u_0(x,y) \geq 0$ and $\not \equiv
0$. Hence by the parabolic maximum principle, for all $(x_1,y)\in \mathbb{R}^N$
$v(1,x_1,y)>0$. Now since $c< c^*$, in \eqref{constrss}, we constructed $w(x_1,y)\geq 0$ a stationary non-zero
sub-solution of \eqref{pardec} with compact support and $w$ could be
chosen arbitrary small. Hence we can assume $w\leq v(1,\cdot,\cdot)$. So if $\tilde{w}$
is the solution of
$$
\begin{cases}
\partial_t \tilde{w} - \Delta \tilde{w} -c \partial_1 \tilde{w} +\alpha g(y) \tilde{w}=f(\tilde{w})
\quad t>0, \; (x_1,y)\in \mathbb{R}^N\\
\tilde{w}(0,x_1,y)=w(x_1,y) \quad (x_1,y)\in \mathbb{R}^N
\end{cases}
$$
then by comparison principle, $\forall t\geq 1$ $\forall (x_1,y)\in
\mathbb{R}^N$ $v(t,x_1,y)\geq \tilde{w}(t-1,x_1,y)$. Now since $w$ is a sub-solution,
$\tilde{w}$ is increasing with respect to $t$ and $0\leq \tilde{w}(t,x_1,y) \leq
V(y)$. Therefore, by standard elliptic estimates,
$\tilde{w}(t,x_1,y) \xrightarrow{t\rightarrow +\infty} z(x,y)$ and $z$ is a
solution of \eqref{uniq}. By theorem \ref{existz}, we have $z\equiv V$ and
this complete the proof of the lemma since by the
comparison principle $\tilde{w}(t-1,x_1,y)\leq v(t,x_1,y)\leq V(y)$ thus
$$
\lim_{t\rightarrow +\infty} v(t,x_1,y)=V(y)
$$
which yields \eqref{cptw}.
\end{proof}
Let us now prove \eqref{cpt}, that is the uniform convergence to $V$ in the expanding slab $\{x_1 \leq ct\}$. We will only prove it for $0\leq x_1\leq ct$. Indeed using as before $u(t,-x_1,y)$, the general result follows from the convergence in the set $\{0\leq x_1\leq ct\}$.
Let $c$ with $0<c<c^*$ be fixed and let $\varepsilon>0$ be given (arbitrarily small). For $R>0$ sufficiently large, we know that the principal eigenvalue $\lambda_\alpha^R$ of the problem \eqref{vpR} above is such that $\lambda_\alpha^R<0$. Denote by $\psi^R>0$ the corresponding eigenfunction of \eqref{vpR}. Under these conditions we know that there exists a unique solution $V^R(y)>0$ of the profile equation in $B_R$ with Dirichlet condition:
\begin{equation}
\label{PAR}
\begin{cases}
-\Delta V^R+\alpha g(y) V^R=f(V^R) \qquad \text{in } B_R \\
V^R=0 \quad \text{on } \partial B_R, \qquad V^R>0 \quad \text{in } B_R.
\end{cases}
\end{equation}
(Compare e.g. \cite{HB81}). Moreover, it is straightforward to show that $V^R$ is increasing with $R$ and that $\lim_{R\rightarrow +\infty} V^R(y)=V(y)$.
Let us choose $R>0$ sufficiently large so that
for all $y\not\in B_R$ $V(y)<\varepsilon $ and for all $y\in \overline{B_R}$ $0<V(y)-V^R(y)<\varepsilon$. The proof of the uniform convergence to $V$ for $c<c^*$ will rest on the following Proposition.
\begin{proposition}
\label{existv}
Let $c$ be such that $0<c<c^*$. Then, with $R$ chosen as above, there exists a solution $v_c(x_1,y)$ defined for $x_1\in \mathbb{R}^-$, $y\in \overline{B_R}$ of equation
\begin{equation}
\label{Pv}
-\Delta v-c\partial_1 v +\alpha g(y)v=f(v) \quad x_1\leq 0, \; y\in \overline{B_R}
\end{equation}
satisfying the following properties:
$$
\begin{cases}
v_c>0 \text{ and } \partial_1 v_c<0 \text{ in } \mathbb{R}_*^- \times B_R, \\
v_c(0,y)=0 \text{ for } y\in \overline{B_R}, \\
v_c(x_1,y)=0 \text{ for } y\in \partial B_R, \; x_1\leq 0, \\
v_c(-\infty,y)=V^R(y) \text{ for } y\in \overline{B_R}.
\end{cases}
$$
\end{proposition}
Postponing the proof of this proposition, let us complete the proof of Theorem \ref{thmcv}. Extending $v_c$ by $0$ for $x_1\geq 0$ turns $v_c$ into a (generalized) sub-solution of equation \eqref{Pv} in the cylinder $\mathbb{R} \times B_R$ (see \cite{BL80}).
Therefore $v_c(x_1-c(t-t_0),y)$ is a sub-solution of the equation \eqref{parabolique} in this cylinder for all $t_0 \geq 0$ and all $c\in (0,c^*)$.
By Lemma \ref{cvp} (applied here in the case $c=0$), we can fix $t_0>0$ sufficiently large such that for $t\geq t_0$ we have
$$
u(t,0,y)\geq V(y)-\frac{\delta}{2} \quad \text{for all } y\in \overline{B_R}
$$
where $\displaystyle \delta =\min_{\overline{B_R}} (V-V^R)>0$. Therefore,
$$
u(t,0,y)>V^R(y) \quad \text{for all } t\geq t_0 \text{ and all } y\in \overline{B_R}.
$$
We fix $\tilde{c} \in (c,c^*)$ and we consider $v(t,x_1,y)=v_{\tilde{c}}(x_1-\tilde{c}(t-t_0),y)$.
In the region $D=(0,+\infty)\times B_R$, $u$ is a solution and $v$ a sub-solution of equation \eqref{parabolique} and for any time $t\geq t_0$
$$
u(t,x_1,y) \geq v (t,x_1,y) \quad \text{for } (x_1,y)\in \partial D.
$$
Moreover, $u(t_0,x_1,y)\geq v(t_0,x_1,y)=0$ in $D$. The comparison principle then yields
$$
u(t,x_1,y)\geq v(t,x_1,y) \quad \text{in } D.
$$
Therefore
\begin{eqnarray*}
\limsup_{t\rightarrow +\infty} \sup_{0\leq x_1\leq ct \atop y\in B_R} \big(V(y)-u(t,x)\big) &\leq& \limsup_{t\rightarrow +\infty} \sup_{0\leq x_1\leq ct \atop y\in B_R} \big(V(y)- v_{\tilde{c}}(x_1-\tilde{c}(t-t_0),y) \big) \\
&\leq& \limsup_{t\rightarrow +\infty} \sup_{y\in B_R} \big( v_{\tilde{c}}((c-\tilde{c})t+\tilde{c}t_0,y) \big) \\
&\leq& \sup_{y\in B_R} \big( V(y) -V^R(y)\big)<\varepsilon.
\end{eqnarray*}
Outside of $B_R$ we already know that $0<u<V<\varepsilon$ for any $t\geq 0$, $x_1 \in \mathbb{R}$ and $|y|\geq R$. Therefore
$$
\limsup_{t\rightarrow +\infty} \sup_{0\leq x_1\leq ct} \big(V(y)-u(t,x)\big) \leq \varepsilon
$$
Since this is true for all $\varepsilon>0$ (and for $-ct\leq x_1\leq 0$), we have thereby established \eqref{cpt}.
It now remains to prove Proposition \ref{existv} which we carry now.
As in \eqref{constrss}, we construct a sub-solution of the equation \eqref{Pv} with compact support, namely:
$$
w(x_1,y)= \begin{cases}
\varepsilon e^{\sigma x_1} \cos(\frac{\pi}{2L}x_1+\frac{\pi}{2}) \psi^R(y) \text{ if }
-2L<x_1<0, \; y\in B_R, \\
0 \text{ otherwise.}
\end{cases}
$$
In comparison with \eqref{constrss}, there is a translation in $x_1$ such that the support of $w$ now lies in $\mathbb{R}_- \times B_R$.
For any $b<0$, let $z_b$ be the solution of
$$
\begin{cases}
-\Delta z_b-c\partial_1 z_b +\alpha g(y)z_b=f(z_b) \quad \text{in } (b,0)\times B_R, \\
z_b(b,y)=V^R(y), \quad z_b(0,y)=0 \quad \text{for } y\in \overline{B_R} \\
z_b(x_1,y)=0 \quad \text{for } x_1\in (b,0), \; |y|=R.
\end{cases}
$$
Since $V^R$ is a super-solution and $0$ a sub-solution, there exists a solution of this problem. By the sliding method of \cite{BN91}, we know that this solution is unique and satisfies $\partial_1 z_b<0$ in $(b,0) \times B_R$.
Next, for $b<-L$, wince $w$ is a sub-solution, we also know that
$$
\forall b\leq -L \quad \forall (x_1,y)\in (b,0) \times \overline{B} \quad z_b(x_1,y)>w(x_1,y).
$$
This allows us to pass to the limit when $b\rightarrow -\infty$. Clearly $z_b(x_1,y) \xrightarrow{b\rightarrow -\infty} v_c(x_1,y)$. By the lower bound, $v_c(x_1,y)>w(x_1,y)$ which shows that $v_c(x_1,y)>0$ in $\mathbb{R}_-^*\times B_R$. Since $\partial_1 v_c \leq 0$ and $v_c\not\equiv 0$, we also know that $\partial_1 v_c < 0$ in $ \mathbb{R}_-^*\times B_R$. Now since $\lim_{x_1\rightarrow -\infty} v_c(x_1,y)$ must be a positive solution of \eqref{PAR}. Hence by uniqueness we get $v_c(-\infty,y)=V^R(y)$. This completes the proof of Proposition \ref{existv} and therefore of Theorem \ref{thmcv}
\end{proof}
\section{The case of a bistable non-linearity. Asymptotic profiles.}
\label{section: APbist}
In this section we consider again equation \eqref{main} but in the bistable framework. That is, we assume that $f$ is a $\mathrm{C}^1$ function that satisfies the following assumptions for some $\theta\in(0,1)$:
\begin{equation}
\label{fbistable}
f(0)=f(\theta)=f(1)=0, \quad f(s)<0 \text{ for } s\in (0,\theta) \text{ and } f(s)>0 \text{ in } (\theta,1),
\end{equation}
\begin{equation}
\label{derivf}
f'(0)>0, \; f'(1)>0.
\end{equation}
We also assume that
\begin{equation}
\label{fcpos}
\int_0^1 f(s)ds>0.
\end{equation}
We are concerned here with the existence of traveling front solutions of \eqref{main}, that is, $(c,u)$ solution of \eqref{FPc}. First we require some preliminary results on the equation \eqref{PA} in the bistable case.
\subsection{Existence of asymptotic profiles in the bistable case}
Consider equation
\begin{equation}
\label{PAb}
\begin{cases}
\Delta u +f(u)-\alpha g(y)u=0, \quad y\in \mathbb{R}^{N-1}, \\
u\geq 0, \quad u \text{ bounded},
\end{cases}
\end{equation}
under the same assumption \eqref{Hypgpos} and \eqref{Hypginf} as above for the function $g$.
The existence of solutions depends on $\alpha$ and is obtained in the following theorem.
\begin{theorem}
\label{thmPAb}
Let $f$ and $g$ satisfy the above assumptions. There exists a threshold value $\alpha^*\in (0,\infty)$, such that:
\begin{description}
\item[i)] For any $\alpha \in ( \alpha^*,+\infty)$, \eqref{PAb} does not have any positive (non-zero) solution.
\item[ii)] For any $\alpha \in (0, \alpha^*]$, \eqref{PAb} admits a maximal positive solution $V(y)$.
\item[iii)] For any $\alpha \in (0, \alpha^*)$, \eqref{PAb} admits a second positive solution $W(y)$ with $0<W(y)<V(y)$.
\end{description}
\end{theorem}
The rest of this section is devoted to the proof of this Theorem.
This Theorem follows from the observation that for $\alpha>0$ any positive solution $u(y)$ of \eqref{PAb} satisfies $u(y)\rightarrow 0$ as $|y|\rightarrow \infty$. This is obtained from Corollary \ref{decry}.
Next, by the maximum principle, any solution of \eqref{PAb} satisfies $0\leq u\leq 1$ (we think of $f(s)$ as having been extended by 0 outside $[0,1]$).
Now $\overline{u}\equiv 1$ is a super-solution of problem \eqref{PAb}. Any solution of \eqref{PAb} for $\alpha$ is a sub-solution of \eqref{PAb} for any parameter $\beta\leq \alpha$. Therefore, if there exists a positive bounded solution of \eqref{PAb} for $\alpha$, there also exists a positive solution for any $0<\beta\leq \alpha$.
Next, we claim that for small enough $\alpha>0$, \eqref{PAb} admits a positive solution. Indeed, consider the functional defined on $\mathcal{H}$:
$$
J(w)=J_\alpha (w)=\int_{\mathbb{R}^{N-1}} \left( \frac{1}{2} |\nabla w|^2 +\frac{\alpha}{2} g(y) w^2 -F(w) \right) dy
$$
where $F(z)=\int_0^z f(s)ds$. Recall that $f$ is extended by 0 outside $[0,1]$, thus $F$ is bounded. Since $g(r)\rightarrow \infty$ as $r\rightarrow \infty$, it is straightforward to show that there exists a minimizer $v$ of $J(w)$:
$J(v)=\min\{ J(w), \, w\in \mathcal{H} \}$. Furthermore, we know that $v\geq 0$ and $v$ is a solution of \eqref{PAb} (see Theorem \ref{energy} for details).
Let us show that for $\alpha>0$ small enough $J(v)<0$. To this end, let $\zeta_R$ be defined by
$$
\zeta_R(y)=
\begin{cases}
1 &\text{if } |y|\leq R \\
R+1 - |y| &\text{if } R \leq |y| \leq R+1 \\
0 & \text{if } |y|\geq R+1
\end{cases}
$$
Then $\zeta_R \in \mathcal{H}$ and
$$
J_0(\zeta_R)=\int_{\mathbb{R}^{N-1}} \frac{|\nabla \zeta_R|^2}{2} -F(\zeta_R) \leq -F(1) |B_R|+C|B_{R+1} \setminus B_R|
$$
where $|A|$ denotes the volume of $A$ and $C$ is a constant.
Since $-F(1)<0$ by \eqref{fcpos}, we see that by choosing $R$ large enough, $J_0(\zeta_R)<0$. Then for such an $R$ fixed, we see that $J_\alpha(\zeta_R)<0$ provided $\alpha >0$ is small enough. This guarantees that $J_\alpha(v)<0$.
It follows that $v\not\equiv 0$. By the maximum principle, we then have $0<v<1$. This shows that for small $\alpha>0$, \eqref{PAb} admits a positive solution.
\medskip
Next, we show that if $\alpha$ is large enough \eqref{PAb} does not admit any positive solution. This can be seen by multiplying the equation by $u$ and integrating to yield:
\begin{equation}
\label{PAint}
\int |\nabla u|^2+\alpha \int g(y) u^2 = \int f(u) u \leq m\int u^2
\end{equation}
where $\displaystyle m=\sup_{s>0} \frac{f(s)}{s}>0$. We conclude with the following lemma:
\begin{lemma}
\label{CSH}
Under the assumption \eqref{Hypginf} $g(r)\xrightarrow{r\rightarrow \infty} \infty$, for any $\varepsilon>0$, there exists a constant $K(\varepsilon)>0$ such that for all $u\in \mathcal{H}$ one has:
$$
\int_{\mathbb{R}^{N-1}} u^2 \leq \varepsilon \int_{\mathbb{R}^{N-1}}|\nabla u|^2 +K(\varepsilon) \int_{\mathbb{R}^{N-1}} g(y) u^2 .
$$
\end{lemma}
Indeed choosing in the lemma $\varepsilon=\frac{1}{2m}$, we get from \eqref{PAint}
$$
\frac{1}{2} \int |\nabla u|^2+\left( \alpha -m K(\frac{1}{2m})\right) \int g(y) u^2 \leq 0
$$
This shows that for $\alpha \geq mK(\frac{1}{2m})$, the only solution of $\eqref{PAb}$ is $u\equiv 0$.
\begin{proof}[Proof of Lemma \ref{CSH}]
Let $\delta=\delta(\varepsilon)>0$ be chosen such that the principal eigenvalue of $-\Delta$ in $H ^1_0(B_{2\delta})$ is larger than $\frac{4}{\varepsilon}$. Let $\chi$ be a smooth cutoff function such that $\chi(r)=1$ if $0\leq r\leq \delta$, $\chi(r)=0$ if $r\geq 2\delta$ and $0\leq \chi \leq 1$. Consider $u_1=\chi u$ and $u_2=(1-\chi)u$ so that $u=u_1+u_2$. Using $(a+b)^2 \leq 2\left( a ^2 +b ^2\right)$, since $u_1\in H^1_0(B_{2\delta})$ by Poincar\'e's inequality, we have
$$
\int_{R^{N-1}} {u_1}^2=\int _{B_{2\delta}} {u_1}^2\leq \frac{\varepsilon}{4} \int _{B_{2\delta}} |\nabla u_1|^2 \leq \frac{\varepsilon}{2} \left( \int _{B_{2\delta}} |\nabla u|^2 \chi ^2 + \int _{B_{2\delta}\setminus B_\delta} u^2 |\nabla \chi|^2 \right).
$$
So that
$$
\int_{R^{N-1}} {u_1}^2 \leq \frac{\varepsilon}{2} \int _{B_{2\delta}} |\nabla u|^2 +\varepsilon k_1(\varepsilon) \int_{|y|\geq \delta } u^2
$$
where $k_1(\varepsilon)\geq |\nabla \chi|^2$.
Next $\displaystyle \int _{R^{N-1}} {u_2}^2 \leq \int_{|y|\geq \delta } u^2$.
Therefore
$$
\int _{R^{N-1}} {u}^2 \leq 2\left( \int {u_1}^2+\int {u_2}^2 \right) \leq \varepsilon \int |\nabla u|^2+ K(\varepsilon) \int g(y)u^2
$$
where $K(\varepsilon)=2\frac{\varepsilon k_1(\varepsilon)+1}{g(\delta)}$. The lemma is thus proved.
\end{proof}
The next step is to prove that the set of $\alpha >0$ such that \eqref{PAb} has a solution is a closed set. Let $\alpha_j \rightarrow \alpha^*$ be a sequence such that \eqref{PAb} admits a solution $u_j$ such that $0<u_j<1$ for all $j$. Note that by the maximum principle, $\theta <\max u_j <1$. The sequence $(u_j)$ is bounded by 1 and by standard elliptic estimates is locally compact. Therefore, one can extract a subsequence $u_j$ such that $u_j\rightarrow u^*$ uniformly on compact sets in the $\mathrm{C}^2$-norm. Therefore $u^*$ is a solution of \eqref{PAb} for the value $\alpha=\alpha^*$. We know that $u^*\geq 0$, but since $\max u_j >\theta$, we see that $\max u^*\geq \theta$. Indeed by section \ref{section: prelim}, $u_j(y)\rightarrow 0$ as $|y| \rightarrow \infty$ uniformly with respect to $j$. Therefore $u^*>0$ and \eqref{PAb} also has a positive solution for $\alpha^*$. This shows that the set of $\alpha$ such that \eqref{PAb} has a positive solution is an interval $(0,\alpha^*]$ with $0<\alpha^*<\infty$.
Considering the evolution equation
$$
\begin{cases}
\partial_t z-\Delta z +\alpha g(y) z=f(z), \quad t>0, \; y\in \mathbb{R}^{N-1}, \\
z(0,y)=1,
\end{cases}
$$
we see that $t\mapsto z(t,y)\geq 0$ is decreasing and therefore has a limit. This limit is necessarily the maximal positive solution $V=V_\alpha$ for the $\alpha$ for which \eqref{PAb} has a positive solution, that is $\alpha \in (0,\alpha^*]$, or is 0 in the opposite case, that is when $\alpha >\alpha^*$.
The existence of a second solution when $0<\alpha<\alpha^*$ is inspired from a work of P. Rabinowitz \cite{PR1}.
In a slightly different formulation, the existence of pairs of solutions is established in \cite{PR1} by a topological degree argument for bistable type nonlinearities and another type of parameter dependance. The use of the topological degree involves compact operators and the results of \cite{PR1} are set in the framework of bounded domains. A similar construction can be carried here owing to the condition \eqref{Hypginf} $g(r)\rightarrow +\infty$ as $r\rightarrow +\infty$. Indeed, under this condition, the injection $\mathcal{H} \hookrightarrow L^2(\mathbb{R}^{N-1})$ is compact.This allows one to construct a compact operator and to carry the argument of \cite{PR1} to the present framework.
Since we will not use the second solution, we will leave out the details of the proof of the existence of a second solution.
\subsection{Stable asymptotic profiles}
As we have seen, a solution of \eqref{PAb} is obtained by the minimization of $J=J_\alpha$ defined above. The proof of the existence of the previous solution for $\alpha>0$ small yields the following result.
\begin{proposition}
\label{PAminJ}
There exists $0<\alpha_*\leq \alpha^*$ such that for all $\alpha \in (0,\alpha_*)$ there exists a minimum $v_\alpha>0$ of $J_\alpha$ and such that
$$
J_\alpha(v_\alpha)=\min_{H^1(\mathbb{R}^{N-1})} J_\alpha <0.
$$
\end{proposition}
In the following, we require the notion of stable solution.
\begin{definition}
\label{defvp}
Let $v$ be a solution of \eqref{PAb}. Eigenvalues of the linearized problem about $v$ are defined as the eigenvalues $\lambda$ of
$$
-\Delta \varphi +\alpha g(y) \varphi -f'(v)\varphi = \lambda \varphi \quad \text{in } \mathbb{R}^{N-1}.
$$
The principle eigenvalue is uniquely determined by the existence of a corresponding eigenfunction $\varphi$ with $\varphi>0$.
We say that $v$ is (weakly) stable if the principal eigenvalue $\lambda=\lambda_1[v]$ of the linearized problem satisfies $\lambda_1[v]\geq 0$.
\end{definition}
It is well known that the maximal solution $V(y)$ given by Theorem \ref{thmPAb} when $0<\alpha\leq \alpha^*$ is weakly stable. Likewise, the minimum solution of the energy of the Proposition \ref{PAminJ} above, when $0<\alpha<\alpha_*$, is a weakly stable solution.
In the following we consider the case $0<\alpha<\alpha_*$ and we make the following assumption.
\begin{equation}
\label{uniqsPA}
\text{There exists a unique positive stable solution of \eqref{PA}.}
\end{equation}
This condition implies that the minimizer solution $v_\alpha$: $J_\alpha(v_\alpha)=\min\{J_\alpha(v), \; v\in \mathcal{H} \}$ coincides with the maximum solution $V$.
We leave it as an open problem to give sufficient conditions for the uniqueness of the stable solution. Uniqueness results have been given for analogous problems but with $\alpha=0$, which would rather correspond to the minimal solution in our framework \cite{PS}.
Likewise it would be interesting to give sufficient conditions that ensure that $\alpha_*=\alpha^*$.
Condition \eqref{uniqsPA} has several implications that we can state.
\begin{proposition}
For $\alpha \in (0,\alpha_*]$, under condition \eqref{uniqsPA}, there does not exist a pair of distinct ordered functions $(v_1,v_2)$ with $0<v_1\leq v_2 < V$, $v_1$ is a sub-solution and $v_2$ is a non-maximal solution. That is, if $0<v_1\leq v_2<V$ are respectively sub-solution and solution of \eqref{PAb}, then $v_1\equiv v_2$.
\end{proposition}
\begin{proof}
The proof follows the observation in \cite{BN91}. However, it requires new elements in view of the unbounded domain. If $v_1<v_2$, let $\varphi_2$ be a principal eigenfunction of the linearized problem corresponding to $\lambda_1[v_2]$. Since $0$ and $V$ are the only stable solutions, $\lambda_1[v_2]<0$. We claim that for $\varepsilon >0$ sufficiently small, $\overline{v}=v_1-\varepsilon \varphi_2$ is a super-solution of \eqref{PAb}. Indeed
\begin{eqnarray*}
-\Delta \overline{v} +\alpha g(y) \overline{v} -f(\overline{v})&=&f(v_2)-f(\overline{v})-f'(v_2)\varepsilon \varphi_2 -\lambda_1[v_2]\varepsilon \varphi_2\\
&=& \left( \frac{f(v_2)-f(v_2-\varepsilon \varphi_2) }{\varepsilon \varphi_2} -f'(v_2)-\lambda_1[v_2] \right) \varepsilon \varphi_2.
\end{eqnarray*}
The right hand side is positive if $\varepsilon>0$ is sufficiently small.
Next, given $R>0$, we can choose $\varepsilon>0$ small enough so that $v_1<v_2-\varepsilon \varphi_2 $ in $\overline{B_R}$. We choose $R$ so that $v_1(y) \leq \delta$ for all $|y|\geq R$ and $f$ is decreasing on $[0,\delta]$. We claim that then $v_1\leq v_2-\varepsilon \varphi_2$ in $\mathbb{R}^{N-1}\setminus \overline{B_R}$. Argue by contradiction. In this were not the case, then, since $v_1$, $v_2$ and $\varphi_2$ converge to 0 at infinity, there exists $y$, $|y|>R$ such that
$$
\min_{\mathbb{R}^{N-1}} \{ v_2-\varepsilon \varphi_2-v_1\}=v_2(y)-\varepsilon \varphi_2(y)-v_1(y) <0
$$
This implies that $0< \overline{v}(y)<v_1(y)\leq \delta$. Denote $L$ the operator $L=-\Delta +\alpha g(y)$. Since $0\leq L(\overline{v}-v_1)-(f(\overline{v})-f(v_1))$ and $f(\overline{v}(y))-f(v_1(y))>0$, at the point $y$ we get $L(\overline{v}-v_1)(y)> 0$. Therefore, we have reached a contradiction. This shows that $v_1\leq v_2-\varepsilon \varphi_2$. Now we have a super-solution $\overline{v}$ above a sub-solution $v_1$. This implies that there exists a stable solution $v$ such that $v_1 \leq v \leq v_2-\varepsilon\varphi_2 <V$. This however is in contradiction with condition \eqref{uniqsPA}.
\end{proof}
From this property, we derive the following useful consequence.
\begin{proposition}
Let $\alpha \in (0,\alpha_*)$ and let $W$ be the maximal solution of equation \eqref{PAb} with the value $\alpha_*$ of the parameter. Then, any other solution $v$ of \eqref{PAb} with parameter $\alpha$ that is not the maximal solution cannot be above $W$.
\end{proposition}
This immediately follows from the previous proposition as $W$ is a sub-solution of the equation for the value $\alpha<\alpha_*$ and $W<V$.
A consequence of this proposition is
\begin{proposition}
\label{Visole}
For $\alpha \in (0,\alpha_*]$ and under condition \eqref{uniqsPA}, the maximal solution $V$ is isolated in $L^\infty$ topology. Therefore, there exists $\theta_1>\theta$ such that if $v$ is a solution of \eqref{PAb} with $v(0)\geq \theta_1$ then $v\equiv V$.
\end{proposition}
As we have done before,we can prove that if $v$ is a solution such that $v\geq W$ in $\overline{B_R}$, then $v\geq W<V$ in $\mathbb{R}^{N-1}$. Then any solution $v\not \equiv V$ is such that there exists $y \in \overline{B_R}$ such that $v(y)\leq W(y)$ and therefore $\|v-V\|_{L^\infty} \geq \min_{\overline{B_R}}V-W=\delta>0$.
Now, if there exist a sequence $v_n$ of solutions of \eqref{PAb} such that $v_n(0)\rightarrow V(0)$ then by elliptic estimates $v_n\rightarrow W$ a positive solution of \eqref{PAb} and $W(0)=V(0)$ so $W\equiv V$ by the maximum principle which contradicts the fact that $V$ is isolated.
\section{Traveling fronts for a bistable non-linearity}
\label{section: TFbist}
In this section we assume that $f$ if of bistable type and satisfies \eqref{fbistable}-\eqref{fcpos}. In addition, we assume that $0<\alpha<\alpha_*$ and that condition \eqref{uniqsPA} is fulfilled. Therefore, there exists a unique non-zero stable solution $V(y)=V_\alpha(y)$ of the profile equation \eqref{PAb}. Therefore $V>0$, $J_\alpha(V)=\min \{ J(w), \; w\in \mathcal{H} \}$, $\lambda_1[V] \geq 0$ and $V$ is isolated in the $L^\infty$ topology. Furthermore $V$ is the maximal solution. Any other non-zero solution $w$ satisfies $0<w<V$ in $\mathbb{R}^{N-1}$ and $\lambda_1[w]<0$ where $\lambda_1[w]$ is the principal eigenvalue of the linearized problem defined in definition \ref{defvp}.
In this section, we prove the existence of a traveling front solution of \eqref{main} representing an invasion of $0$ by the state $V$ at positive speed. Such a solution is given as a pair $(c,u)$ of
\begin{equation}
\label{FPb}
\begin{cases}
-\Delta u -c \partial_1 u+\alpha g(y)u=f(u) \quad \text{in } \mathbb{R}^N \\
u(-\infty,y)=V(y), \quad u(+\infty,y)=0
\end{cases}
\end{equation}
with $c<0$ and $u:\mathbb{R}^N\rightarrow (0,1)$.
We follow the construction of a solution given above. Namely, let $a\geq 1$ and in the slab $\Sigma_a=(-a,a)\times \mathbb{R}^{N-1}$, consider the problem
\begin{equation}
\label{FPab}
\begin{cases}
-\Delta u -c\partial_1 u +\alpha g(y) u=f(u) \quad \text{in } \Sigma_a,\\
u(-a,y)=V(y), \quad u(+a,y)=0.
\end{cases}
\end{equation}
We recall that for any $c\in \mathbb{R}$, for $a$ fixed, there exists a unique solution $u=u^c$ of \eqref{FPab}. Furthermore, $0<u<V$ and $\partial_1 u <0$ in $\Sigma_a$.The mapping $c\mapsto u^c$ is decreasing.
Up to here, the procedure is the same as before. From this point on however, we need to modify the above argument since we used the fact that $f$ was positive.
Our first task is to prove the following
\begin{proposition}
There exists a unique $(c_a,u_a)$ such that $u_a$ is a solution of \eqref{FPab} for speed $c_a$ and $u_a$ satisfies the normalization condition
\begin{equation}
\label{NC}
\max_{y\in R^{N-1}} u_a(0,y)=\theta.
\end{equation}
Let us first prove the existence of $c_a$. The uniqueness is clear.
The parameter $c_a$ is bounded independently of $a\geq 1$. Moreover,
$$
\liminf_{a\rightarrow +\infty} c_a \geq 0.
$$
\end{proposition}
\begin{proof}
The bound from above is obtained simply by comparison with the one dimensional problem. Indeed, consider the ODE problem for $z=z(x_1)$:
\begin{equation}
\label{TF1D}
\begin{cases}
-z''-\gamma z'=f(z) \quad \text{in } (-a,a) \\
z(-a)=1, \quad z(+a)=0, \quad z(0)=\theta
\end{cases}
\end{equation}
It is known that there exists a unique value $\gamma^a$ for which \eqref{TF1D} has a (unique) solution $z$. Furthermore, $\lim_{a\rightarrow +\infty} \gamma^a=\gamma^*$ where $\gamma^*$ is the unique speed of traveling fronts for the 1D equation
\begin{equation*}
\begin{cases}
-z''-\gamma^* z'=f(z) \quad \text{in } (-a,a) \\
z(-\infty)=1, \quad z(+\infty)=0
\end{cases}
\end{equation*}
Comparing \eqref{TF1D} with \eqref{FPab}, we see that for each $c=\gamma^a$, the solution $z$ of \eqref{TF1D} is a super-solution of \eqref{FPab}, thus $z>u^{\gamma^a}$ and for all $y\in\mathbb{R}^{N-1}$, $u^{\gamma^a}(0,y)<z(0)=\theta$. Since $c\mapsto u^c$ is decreasing, we see that
$$
\max_{y\in R^{N-1}}u^c(0,y) <\theta \text{ for all }c\geq \gamma_a.
$$
Assume now that $\max_{y\in R^{N-1}}u^c(0,y) <\theta$ for all $c\in \mathbb{R}$. Passing to the limit for $c\rightarrow -\infty$, $u^c$ converges toward a positive solution $v$ of \eqref{PAb} with $\max v <\theta$. By the maximum principle, it is impossible thus there exists a unique $c_a \in (-\infty,\gamma^a)$ such that \eqref{NC} is fulfilled.
Since $\gamma^a \rightarrow \gamma^*<\infty$ as $a\rightarrow +\infty$ and $a \mapsto \gamma^a$ is a continuous function, this shows that $\displaystyle \sup_{a \geq 1} c_a <\infty$.
Since $a \mapsto c_a$ is continuous, in order to complete the proof of the Proposition, it suffices to show that $\liminf_{a\rightarrow \infty} c_a \geq 0$. For this, we argue by contradiction and assume that for a sequence $a_j\rightarrow +\infty$ there holds $c_{a_j}<0$. For the sake of simplicity, we write $a$ instead of $a_j$. Since $c\mapsto u^c$ is decreasing, from this we infer that along this subsequence, the solution $v=v^a$ of
$$
\begin{cases}
-\Delta v+\alpha g(y) v =f(v) \quad \text{in } \Sigma_a \\
v(-a,y)=V(y), \quad v(+a,y)=0
\end{cases}
$$
satisfies $\max_{y\in \mathbb{R}^{N-1}}v(0,y)\leq \theta$.
Due to Proposition \ref{Visole}, there exist $\theta_1>\theta$ such that if an asymptotic profile $v$ solution of \eqref{PAb} verifies $v(0)\geq \theta_1$ then $v\equiv V$.
There is a point $b=b_j$, $-a<b < 0$ such that $v_a(b,0)=\theta_1$. We now translate the solution to center it on $x_1=b$. That is, we let $\tilde{v}_a(x_1,y)=v_a(x_1+b,y)$ defined for $x_1\in (-a-b,a-b)$ and $y \in \mathbb{R}^{N-1}$. The interval $(-a-b,a-b)$ either converges (along a subsequence) to $(-\infty,+\infty)$ or to some $(-d,+\infty)$ with $0\leq d< \infty$. In both cases, by standard elliptic estimates, one can strike out a subsequence of $\tilde{v}_a$, denoted again $\tilde{v}_a$, such that $\tilde{v}_a$ converges locally to some function $w$ where $w$ satisfies:
\begin{equation}
\label{Eqw}
\begin{cases}
-\Delta w + \alpha g(y) w =f(w) \quad \text{in } (-d,+\infty)\times \mathbb{R}^{N-1} \\
\partial_1 w \leq 0, \quad w(0,0)=\theta_1.
\end{cases}
\end{equation}
In case the interval is converging to $(-d,+\infty)$, in addition we know that $w(-d,y)=V(y)$.
If the interval converges to $\mathbb{R}$, then $\lim_{x\rightarrow -\infty} w(x_1,y)$ exists and is some function $W(y)$ which is then a solution of the profile equation \eqref{PAb}. But since $w(0,0)=\theta_1$, we know that $W(0)\geq \theta_1$. By the definition of $\theta_1$, this implies that $W\equiv V$. Therefore, denoting $d=\infty$ in case $(-a-b,a-b) \rightarrow \mathbb{R}$, in both cases, we get
$$
\forall y\in \mathbb{R}^{N-1} \quad w(-d,y)=V(y)
$$
where now $0 \leq d\leq +\infty$. We also know that $w(+\infty,y)=\psi(y)$ exists with $0\leq \psi<V$.
Multiply \eqref{Eqw} by $\partial_1 w$ and integrate over $(-d,+\infty) \times \mathbb{R}^{N-1}$ to get
$$
\int_{\{x_1=-d\}} \frac{1}{2}(\partial_1 w)^2+J(\psi)-J(V)=0
$$
where $\partial_1 w=0$ if $d=\infty$. In all cases, we get
$$
J(V)\geq J(\psi)
$$
Since $V$ minimize $J_\alpha$, we obtain $V\equiv \psi$ and $w(x_1,y)=V(y)$ for all $x_1\in (-d,+\infty)$ but this contradicts the renormalization $w(0,0)=\theta_1$.
We have thus reached a contradiction. This shows that for large $a$, $c_a\geq 0$, which completes the proof of the Proposition.
\end{proof}
Let us now turn to the proof of the existence of traveling front solutions of \eqref{FPb}. Since $c_a$ and $u_a$ are bounded, by standard elliptic estimates, we can strike out a sequence $a=a_j\rightarrow \infty$ (we continue to denote subsequences by $a$) such that $c_a\rightarrow c\geq 0$ and $u_a\rightarrow u$. We know that $(c,u)$ satisfies the equation
$$
- \Delta u +c \partial_1 u +\alpha g(y) u =f(u) \quad \text{ in } \mathbb{R}^N
$$
with $\partial_1 u \leq 0$ and $\max_{R^{N-1}}u(0,\cdot)=\theta$. It remains to identify the limits as $x_1\rightarrow \pm \infty$. These $\lim_{x_1\rightarrow \pm \infty} u(x_1,y)=u_{\pm}(y)$ exist and are solutions of the asymptotic profile equation \eqref{PAb}. Now since $0\leq u_+(y)=\lim_{x_1\rightarrow +\infty} u(x_1,y) \leq \theta$ and all positive solutions $w$ of \eqref{PAb} satisfy $\max w>\theta$, we have $u_+\equiv 0$.
We claim that $u_- (y)=\lim_{x_1\rightarrow -\infty}u(x_1,y)$ coincides with $V(y)$. Clearly, $0<u_-\leq V$. Argue by contradiction that $u_- \not\equiv V$, implying $u_-<V$. By assumption, $u_-$ is an unstable solution of \eqref{PAb} in the sense that $\lambda_1[u_-]<0$. Let us construct a super-solution of the stationary equation, that is a $w$ with
$$
-\Delta w+\alpha g(y) w \geq f(w)
$$
such that $w$ is a compact perturbation of $u_-$ and as close as we wish to $u_-$.
Consider the linearized equation about $u_-$:
$$
-\Delta \psi -f'(u_-(y))\psi +\alpha g(y) \psi= \lambda_1[u_-] \psi
$$
with $\lambda_1[u_-]<0$. We know that $\lambda_1[u_-]$ is the limit of the Dirichlet principal eigenvalue in a ball when the radius goes to infinity (This follows from the Rayleigh quotient minimization). Therefore, $R>0$ can be chosen sufficiently large so that the principal eigenvalue $\mu$ and associated eigenfunction $\psi$ of
$$
\begin{cases}
-\Delta \psi +\alpha g(y)\psi-f'(u_-)\psi=\mu \psi \quad \text{ in } B_R \\
\psi=0 \quad \text{on } \partial B_R, \qquad \psi >0 \quad \text{in } B_R
\end{cases}
$$
satisfy $\mu<0$.
Consider the function $\zeta (x_1,y)=\cos (\omega x_1) \psi(y)$ defined for $x_1\in (-L,L)$ with $L=\frac{\pi}{2 \omega}$ and $|y|<R$. We note $D=(-L,L)\times B_R$. This function is positive and satisfies:
$$
\begin{cases}
-\Delta \zeta +\alpha g(y)\zeta - f'(u_-)\zeta=(\mu+\omega^2)\zeta \quad \text{ in } D \\
\zeta = 0 \quad \text{on } \partial D
\end{cases}
$$
Choose $L$ large enough so that $\mu+\omega^2<0$. Then let $w(x_1,y)=u_-(y)-\varepsilon \zeta(x_1,y)$ with $\varepsilon>0$ and $(x_1,y)\in D$. This function satisfies
$$
-\Delta w+\alpha g(y) w - f(w)= \left( -(\mu+\omega^2)+ \frac{f(u_-) - f(u_--\varepsilon \zeta)}{\varepsilon \zeta} -f'(u_-)\right) \varepsilon \zeta.
$$
Since $\mu+\omega^2<0$, we can choose $\varepsilon$ sufficiently small so that
$$
-\Delta w+\alpha g(y) w - f(w)\geq 0 \quad \text{ in } D \text{ and } w>0.
$$
Furthermore, because $\varepsilon \zeta =0$ on $\partial D$ and $\varepsilon \zeta >0$ in $D$, that is $w<u_-$ in $D$, if we extend $w$ by choosing $w(x_1,y)=u_-(y)$ for all $(x_1,y) \not \in D$, we have constructed a (generalized) super-solution of the problem (see e.g. \cite{BL80}).
Let us now derive a contradiction. We consider two cases.
$\bullet$ Cases (i): Suppose $c>0$. Then $U(t,x_1,y)=u(x_1-ct,y)$ is a solution of the evolution equation
$$
\partial_t U - \Delta U +\alpha g(y) U=f(U) \qquad t\in \mathbb{R}, \;(x_1,y)\in \mathbb{R}^N.
$$
Now $U \xrightarrow{t\rightarrow -\infty}0$ locally uniformly in $(x_1,y)$. Furthermore, for all times $U(t,x_1,y)\leq u_-(y)$. Since $w$ is a compact perturbation of $u_-$ for a time $t_0$ sufficiently negative, we get
$$
\forall (x_1,y)\in \mathbb{R}^N \quad U(t_0,x_1,y)\leq w(x_1,t).
$$
Now when $U(t,x_1,y) \xrightarrow{t\rightarrow +\infty} u_-(y)$ locally uniformly and we get a contradiction since $U(t,x_1,y)\leq w(x_1,y) <u_-(y)$ for all $(x_1,y)\in D$.
$\bullet$ Case (ii): The case that remains to be studied is $c=0$ (since we already have $c\geq 0$). Then $u(x_1,y)$ is a stationary solution of the same equation that $w$ is a super-solution of. Since $u(-\infty,y)=u_-(y)$ and $u(+\infty,y)=0$, and since $w=u_-$ outside a compact set, after a translation, we can assume that $u_h=u(x_1+h,y)\leq w(x_1,y)$(for large enough $h$). Define
$$
h^*=\inf \{h\in \mathbb{R}, u(x_1+h,y) \leq w(x_1,y) \text{ in } \mathbb{R}^N \}.
$$
Clearly, $h^*>-\infty$ (for $w<u_-$ at some points). Then $w(x_1,y) \geq u(x_1+h^*,y)=u_{h^*}$ and $\min (w - u_{h^*})=0$ is necessarily achieved at a point of $\overline{D}$. Since $w(x_1,y)=u_-(y)>u(x_1+h,y)$ for all $h$ if $(x_1,y)\not \in D$, we see that the maximum is achieved at an interior point of $D$. Writing $ w-u_{h^*}\geq 0$ as a super-solution of a linear elliptic equation in $D$, we derive a contradiction with the strong maximum principle.
Therefore in all cases, the solution $u$ satisfies the limiting condition:
$$
u(-\infty,y)=V(y), \qquad u(+\infty,y)=0.
$$
Therefore $(c,u)$ is a solution of the traveling front equation \eqref{FPb}.
\section{The model of cortical spreading depression}
\label{section: CSD}
We consider here more general versions of the model \eqref{SD} described in the Introduction. The problems studied in this paper have the following general form
\begin{equation}
\label{Egen}
\partial_t u -\Delta u=h(y,u) \quad x=(x_1,y)\in \mathbb{R}^N.
\end{equation}
In the modeling context $N=2$ and $3$ are the cases of interest. As indicated in the Introduction, this equation also describes cortical spreading depressions (CSD). There the wave propagates in a medium composed of two different components, the gray and white matters of the brain, with a narrow transition area separating them.
Thus we consider in this section functions $h(y,u)$ of the following type:
\begin{equation}
\label{f1}
h(y,u)=f(u) \text{ for } |y|\leq L_1
\end{equation}
\begin{equation}
\label{f2}
h(y,u)\leq -mu \text{ for } |y|\geq L_2
\end{equation}
\begin{equation}
\label{f3}
h(y,u)+mu\xrightarrow{|y|\rightarrow +\infty} 0 \quad \text{uniformly for } u\in \mathbb{R}^+
\end{equation}
where $0<L_1\leq L_2 <\infty$ and $K\geq m>0$ are given parameters and $f$ is of bistable form. That is we assume that $f$ verifies conditions \eqref{fbistable}-\eqref{fcpos}of section \ref{section: APbist}. Note that in particular, we assume
$$
\int_0^1 f(s)ds >0.
$$
We also assume that $y\mapsto h(y,s)$ is continuous and that $s\mapsto h(y,s)$ is Lipschitz continuous for all $s\in [0,1]$ (and $|y| \not =L_1$ in case $L_1=L_2$). Lastly we assume that
$$
\forall s\in [0,1] \; \forall y\in \mathbb{R}^{N-1} \quad h(y,s)\leq \max \{ f(s), -ms\}.
$$
\subsection{The asymptotic profile equation}
We start as usual with the profile equation
\begin{equation}
\label{APSD}
\begin{cases}
-\Delta V=h(y,V) \quad y\in \mathbb{R}^{N-1}, \\
V\geq 0, \quad V \text{ bounded.}
\end{cases}
\end{equation}
We recall that $\lambda_1[V]$ is the principal eigenvalue of the linearized equation about $V$. This eigenvalue can be defined as
$$
\lambda_1[V]=\inf_{\varphi\in H^1(\mathbb{R}^{N-1})} \frac{\int |\nabla \varphi |^2 -\partial_2 h (y,V) \varphi^2
}{\int \varphi^2}.
$$
Associated with \eqref{APSD} is the energy functional:
$$
J(w)=\int_{\mathbb{R}^{N-1}} \left( \frac{1}{2} |\nabla w|^2 -H(y,w)\right) dy
$$
where $H(y,z)=\int_0^z h(y,s)ds$. Note that owing to condition \eqref{f2}, $J(w)$ is well defined for all $w\in H^1(\mathbb{R}^{N-1})$.
\begin{theorem}
\label{thmSD}
There exist critical radii $0<L_*\leq L^*<\infty$ with the following properties:
\begin{description}
\item[i)] For $L_2<L_*$, there is no solution other than $0$ to the asymptotic profile equation \eqref{APSD}.
\item[ii)] For $L_1>L^*$ (independently of $L_2$), there exists a maximum solution $V$ of \eqref{APSD} and this solution is stable in the sense that $\lambda_1[V]\geq 0$.
\item[iii)] For all $L_1>L^*$, the minimum of the energy functional is achieved at some non-zero function $V_J(y)$, i.e.
$$
J(V_J)=\min_{w\in H^1(\mathbb{R}^{N-1})} J(w) <0.
$$
\end{description}
\end{theorem}
\begin{proof}
i) Since the equation implies that $-\Delta u +mu\leq 0$ for all $|y|\geq L_2$ in $\mathbb{R}^{N-1}$, and $u>0$ is bounded, by Theorem \ref{BR} we know that $u$ and $|\nabla u|$ have exponential decay as $|y|\rightarrow +\infty$. Then we get
$$
\min(1,m) \|u\|_{H^1(\mathbb{R}^{N-1})} \leq \int_{\mathbb{R}^{N-1}} |\nabla u| ^2 +m u^2 \leq \int_{B_{L_2}} \big( f(u) +m u \big)u \leq K \int_{B_{L_2}} u^2.
$$
We know, by Sobolev embedding and H\"{o}lder inequality, that
$$
\int_{B_{L_2}} u^2 \leq \eta(L_2) \|u\|_{H^1(\mathbb{R}^{N-1})}
$$
where $\eta(L_2)\rightarrow 0$ as $L_2\rightarrow 0$. Therefore for $L_2$ small enough, these inequalities yield $u\equiv 0$.
ii) Next, since $1$ is a super-solution of the equation in $\mathbb{R}^{N-1}$, there exists a maximum solution of equation \eqref{APSD} that we denote $V$. By what we have just seen, $V\equiv 0$ for $L_2$ sufficiently small. Let us now show that $V>0$ for $L_1$ sufficiently large.
Let us consider the energy restricted to the ball of radius $R\leq L_1$
$$
J_R(w)=\int_{B_R} \left(\frac{1}{2} |\nabla w|^2 -F(w) \right) dy
$$
where $F(z)=\int_0^1 f(s)ds$. We know (see the proof of Theorem \ref{thmPAb}) that for $R$ sufficiently large there exists a minimum $w_R$ of
$$
J_R(w_R)=\inf_{w\in H^1_0(B_R)} J_R(w) <0.
$$
Then $w_R>0$ in $B_R$ and $w_R$ is solution of $-\Delta w_R=f(w_R)$ in $B_R$ and $w_R=0$ on $\partial B_R$. Extending $w_R$ by $0$ outside the ball $B_R$, we get a global (generalized) sub-solution. The solution $V$ is such that $V\geq w_R$ (since $V$ is the maximum solution). This implies that $V\not\equiv 0$ and therefore $V>0$ in $\mathbb{R}^{N-1}$ for $L_1 \geq R$.
iii) Now for $L_1\geq R$, clearly
$$
\inf_{w\in H^1(\mathbb{R}^{N-1})} J(w)\leq J_R(w_R)<0.
$$
Let us now show that the infimum is achieved.
Let $(w_n)$ be a minimizing sequence: $J(w_n)\rightarrow \inf J <0$ for $n\rightarrow +\infty$. Note that $J$ is bounded from below.
Writing
\begin{eqnarray*}
J(w_n)& \geq& \frac{1}{2} \int_{ \mathbb{R}^{N-1}\setminus B_{L_2}}|\nabla w_n|^2+m {w_n}^2 + \int_{B_{L_2}}\frac{1}{2}|\nabla w_n|^2 -H(y,w_n) \; \\
& \geq & \frac{1}{
2}\int_{ \mathbb{R}^{N-1}\setminus B_{L_2}}|\nabla w_n|^2+m {w_n}^2 +\int_{B_{L_2}}\frac{1}{2}|\nabla w_n|^2 -C+\varepsilon {w_n}^2 \\
& =& -C|B_{L_2}|+\tilde{\varepsilon}\|w_n\|_{H^1(\mathbb{R}^{N-1})}
\end{eqnarray*}
we can strike out a subsequence still denoted $(w_n)$ such that $w_n \rightarrow w$ weakly in $H^1(\mathbb{R}^{N-1})$. Now using \eqref{f3}, for every $\varepsilon>0$, there exists $R=R(\varepsilon)>0$ such that $\left| F(y,s)+\frac{m}{2}s^2 \right|<\varepsilon s^2$ for all $|y|\geq R$ and all $s\in \mathbb{R}^+$ (there is no loss in generality in assuming $w_n\geq 0$ as ${w_n}^+$ is also a minimizing sequence). Therefore
$$
J(w_n)=\frac{1}{2}\int_{ \mathbb{R}^{N-1}}|\nabla w_n|^2+\frac{m}{2} \int_{|y|\geq R} {w_n}^2 +r(\varepsilon) - \int_{|y|\leq R} H(w_n,y)
$$
where $|r(\varepsilon)|\leq C\varepsilon$ for some constant $C>0$.
By compact injection of $H^1(\mathbb{R}^{N-1})\hookrightarrow L^2(B_R)$, we can assumme that $w_n \rightarrow w$ strongly in $L^2(B_{L_2})$. Then by standard arguments relying on Lebesgue's dominated convergence Theorem, we see that
$$
\int_{B_R}H(y,w_n)\rightarrow \int_{B_R} H(y,w).
$$
Owing to Sobolev embedding and H\:{o}lder inequality, it is straightforward to check that the quantity
$$
\frac{1}{2}\int_{ \mathbb{R}^{N-1}}|\nabla w_n|^2+\frac{m}{2} \int_{|y|\geq R} {w_n}^2
$$
defines the square of a norm equivalent to the usual $H^1(\mathbb{R}^{N-1})$ norm. Hence using the lower semi-continuity of the norm, we get
$$
\lim_{n\rightarrow \infty} J(w_n)=\inf J\geq \frac{1}{2}\int_{ \mathbb{R}^{N-1}}|\nabla w|^2+\frac{m}{2} \int_{|y|\geq R} {w}^2+r(\varepsilon)-\int_{B_R} H(y,w).
$$
Now using again \eqref{f3}, we get:
$$
J(w)=\int_{ \mathbb{R}^{N-1}}|\nabla w|^2-\int_{\mathbb{R}^{N-1}} H(y,w) \leq \inf_{H^1(\mathbb{R}^{N-1})} J+2 |r(\varepsilon)|.
$$
Since $\varepsilon>0$ is arbitrarily small we get
$\displaystyle
J(w)=\inf_{ H^1 (\mathbb{R}^{N-1})} J.
$
\end{proof}
\begin{remark}
By using the method of \cite{BL80bis}, one can show that for $L_1$ large enough there exists a second solution in $\mathbb{R}^{N-1}$.
\end{remark}
We will now make use of the condition that the stable solution of \eqref{APSD} is unique. In the paper of Chapuisat and Joly \cite{CJ11}, it is argued by phase plane method, that for the case $N-1=2$, $L_1=L_2$ and $f(y,s)=-ms$ for $|y|\geq L_2$ that indeed this is the case. We note that it is an interesting open problem to derive such uniqueness results in more general situations or to complete the heuristic part of the argument of \cite{CJ11}.
\subsection{Traveling fronts for the CSD model}
In this section, we prove Theorem \ref{thmSDTF}. The proof is similar as in Section \ref{section: TFbist}. There we used that $h(y,u)= f(u)-\alpha g(y) u$ with $g\rightarrow +\infty$. But actually, the same properties that were entailed one can derived for $h(y,u)\leq -mu$ for large $|y|$. We start by constructing a solution of
\begin{equation}
\label{TFSDa}
\begin{cases}
\Delta u_a-c_a \partial_1 u_a =h(y,u_a) \quad \text{ in } (-a,a)\times \mathbb{R}^{N-1} \\
u_a(-a,y)=V(y), \quad u_a(+a,y)=0,
\end{cases}
\end{equation}
that verifies
\begin{equation}
\label{normSD}
\sup_{y\in \mathbb{R}^{N-1}} u_a(0,y)=\theta
\end{equation}
for $a\geq 1$ and where $\theta$ is the unstable 0 of $f(u)$, that is $f(\theta)=0$ and $0<\theta<1$. We recall that $c_a$ is uniquely determined by the renormalization condition \eqref{normSD}.
\hspace{-7mm}
\begin{minipage}{0.7\textwidth}
Let $\tilde{f}(u)=\max \{f(u), -m(u)\}$. Note that $\tilde{f}$ itself is bistable:
$$
\tilde{f}(0)=\tilde{f}(\theta)=\tilde{f}(1), \quad \tilde{f}(s)<0 \text{ in }(0,\theta), \quad \tilde{f}(s)>0 \text{ in }(\theta, 1).
$$
\end{minipage} \hfill
\begin{minipage}{0.28\textwidth}
\resizebox{\textwidth}{!}{\input{ftilde.pspdftex}}
\end{minipage}
We denote by $z_a^c$ the solution of
\begin{equation}
\label{1DSD}
\begin{cases}
-z''-cz'=\tilde{f}(z) \\
z(-a)=1, \quad z(+a)=0.
\end{cases}
\end{equation}
The function $z_a^{c_a}$ is a supersolution of \eqref{TFSDa} thus $u_a \leq z_a^{c_a}$. In view of \eqref{normSD} this implies that $z_a^{c_a}(0)\geq \theta$ and this implies that $c_a\leq \gamma_a$ where $\gamma_a$ is the unique value of $c$ such that the solution of \eqref{1DSD} verifies $z_a^{\gamma_a}(0)=\theta$. This as before yields the upper bound for $c_a$.
The lower bound is achieved in the same manner as in the section \ref{section: TFbist} and the convergence for $a\rightarrow +\infty$ also.
\section*{Acknowledgments}
This study was supported by the French "Agence Nationale de la Recherche" through the project PREFERED (ANR 08-BLAN-0313). Henri Berestycki was partially supported by an NSF FRG grant DMS-1065979. Part of this work was carried out while he was visiting the Department of Mathematics at the University of Chicago.
We wish to thank Laurent Desvillettes for bringing to our attention the population genetics model described in equations \eqref{DP1}-\eqref{DP2}.
\bibliographystyle{plain}
|
{
"timestamp": "2012-06-29T02:01:47",
"yymm": "1206",
"arxiv_id": "1206.6575",
"language": "en",
"url": "https://arxiv.org/abs/1206.6575"
}
|
\section{Introduction}
Let $V$ be a left vector space over a division ring $R$.
We suppose that $\dim V=n$ is finite and not less than $2$.
Denote by ${\mathcal P}(V)$ the associated projective space formed by $1$-dimensional subspaces of $V$.
Our first result (Theorem 1) is related to extendability of permutations on finite subsets of $V$ to
linear automorphisms of $V$:
if every permutation on a finite subset of $V$ can be extended to a linear automorphism of $V$ then
this subset is formed by linearly independent vectors or it consists of
$$x_{1},\dots,x_{m},-(x_{1}+\dots+x_{m}),$$
where $x_{1},\dots,x_{m}$ are linearly independent vectors.
Under the assumption that $R$ is a field, we determine all finite subsets ${\mathcal X}\subset {\mathcal P}(V)$
such that every permutation on ${\mathcal X}$ can be extended to an element of ${\rm PGL}(V)$.
Our second result (Theorem 2) states that there are precisely three distinct types of such subsets.
The main results (Theorems 1 and 2) will be reformulated in terms of linear and projective representations of symmetric groups
(Corollaries 1 and 2).
\section{Permutations on finite subsets of vector spaces}
Let $X$ be a finite subset of $V$ containing more than one vector.
Denote by $S(X)$ the group of all permutations on $X$.
We want to determine all cases when every element of ${\rm S}(X)$ can be extended to a linear automorphism of $V$.
This is possible, for example, if $X$ is formed by linearly independent vectors.
\begin{exmp}\label{exmp1}{\rm
Suppose that $X$ consists of linearly independent vectors $x_{1},\dots,x_{m}$ and
the vector
$$x_{m+1}=-(x_{1}+\dots+x_{m}).$$
For every $i\in \{1,\dots m-1\}$ we take any linear automorphism $u_{i}\in {\rm GL}(V)$ such that
$$u_{i}(x_{i})=x_{i+1},\;u_{i}(x_{i+1})=x_{i}\;\mbox{ and }\;u_{i}(x_{j})=x_{j}\;\mbox{ if }\;j\ne i,i+1,m+1.$$
Every $u_i$ sends $x_{m+1}$ to itself.
Consider a linear automorphism $v\in {\rm GL}(V)$ leaving fixed every $x_{i}$ for $i\le m-1$ and
transferring $x_{m}$ to $x_{m+1}$.
Then
$$v(x_{m+1})=-(v(x_{1})+\dots+v(x_{m}))=-(x_{1}+\dots+x_{m-1}-x_{1}-\dots-x_{m-1}-x_{m})=x_{m}.$$
So, all transpositions of type $(x_{i},x_{i+1})$ can be extended to linear automorphisms of $V$.
Since $S(X)$ is spanned by these transpositions,
every permutation on $X$ is extendable to a linear automorphism of $V$.
}\end{exmp}
\begin{theorem}\label{theorem1}
If every permutation on $X$ can be extended to a linear automorphism of $V$
then $X$ is formed by linearly independent vectors or it consists of
$$x_{1},\dots,x_{m},-(x_{1}+\dots+x_{m}),$$
where $x_{1},\dots,x_{m}$ are linearly independent.
\end{theorem}
\begin{proof}
Let $x_{1},\dots,x_{k}$ be the elements of $X$.
Suppose that these vectors are not linearly independent
and consider any maximal collection of linearly independent vectors from $X$.
We can assume that this collection is formed by $x_{1},\dots,x_{m}$, $m<k$.
Then every $x_{p}$ with $p>m$ is a linear combination of $x_{1},\dots,x_{m}$, i.e.
$x_{p}=\sum^{m}_{l=1}a_{l}x_{l}$.
Let $u\in {\rm GL}(V)$ be an extension of the transposition $(x_{i},x_{j})$, $i,j\le m$.
Then
$$u(x_{i})=x_{j},\;u(x_{j})=x_{i}\;\mbox{ and }\;u(x_{l})=x_{l}\;\mbox{ if }\;l\ne i,j.$$
We have
$$\sum^{m}_{l=1}a_{l}x_{l}=x_{p}=u(x_{p})=\sum^{m}_{l=1}b_{l}x_{l},\;\mbox{ where }\;
b_{i}=a_{j},\;b_{j}=a_{i}\;\mbox{ and }\;b_{l}=a_{l}\;\mbox{ if }\;l\ne i,j.$$
Since $x_{1},\dots,x_{m}$ are linearly independent, the latter means that $a_{i}=a_{j}$.
This equality holds for any $i,j\le m$ and we have
$$x_{p}=a(x_{1}+\dots+x_{m})$$
for some non-zero scalar $a\in R$.
Let $v\in {\rm GL}(V)$ be an extension of the transposition $(x_{1},x_{p})$.
Then
$$v(x_{1})=x_{p},\;v(x_{p})=x_{1}\;\mbox{ and }\;v(x_{i})=x_{i}\;\mbox{ if }\;i\ne 1,p$$
We have
$$x_{1}=v(x_{p})=a(v(x_{1})+\dots+v(x_{m}))=a(x_{p}+x_{2}+\dots+x_{m})=$$
$$=a^{2}(x_{1}+\dots+ x_{m})+a(x_{2}+\dots+x_{m})=a^{2}x_{1}+(a^{2}+a)(x_{2}+\dots+x_{m}).$$
Hence $a^{2}=1$ and $a^{2}+a=0$ which implies that $a=-1$ and
$$x_{p}=-(x_{1}+\dots+x_{m}).$$
This equality holds for every $p>m$.
Therefore, $k=m+1$ and the second possibility is realized.
\end{proof}
Let $X$ be a finite subset of $V$ such that every permutation on $X$ can be extended to a linear automorphism of $V$.
Suppose that $|X|\ge 2$ and $\langle X\rangle=V$.
Then for every $s\in S(X)$ there is the unique extension $\alpha_{X}(s)\in {\rm GL}(V)$.
If $s,t\in S(X)$ then $\alpha_{X}(st)$ and $\alpha_{X}(s)\alpha_{X}(t)$ both are extensions of $st$
which guarantees that
$$\alpha_{X}(st)=\alpha_{X}(s)\alpha_{X}(t).$$
Thus $\alpha_{X}$ is a monomorphism of $S(X)$ to ${\rm GL}(V)$
(it is clear that the kernel of $\alpha_{X}$ is trivial).
The image of $\alpha_{X}$ will be denoted $G(X)$.
\begin{cor}\label{cor1}
Let $G$ be a subgroup of ${\rm GL}(V)$ isomorphic to ${\rm S}_{m}$.
Let also $X$ be an orbit of $G$ such that $G$ acts faithfully on $X$ and $|X|=m$
\footnote{If $X$ is an orbit of $G$ and $G$ acts faithfully on $X$ then $|X|\ge m$.}.
Suppose that there are not proper $G$-invariant subspaces of $V$.
Then the following assertions are fulfilled:
\begin{enumerate}
\item[$\bullet$]
$X$ is a base of $V$ or $X=\{x_{1},\dots,x_{n},-(x_{1}+\dots+x_{n})\}$,
where $x_{1},\dots,x_{n}$ form a base of $V$;
\item[$\bullet$]
$G=G(X)$.
\end{enumerate}
\end{cor}
\begin{proof}
Let $r$ be the homomorphism of $G$ to $S(X)$ transferring every $g\in G$ to $g|_{X}$.
Since the action of $G$ on $X$ is faithful, $r$ is a monomorphism.
It follows from our assumptions that
$G$ and $S(X)$ have the same number of elements.
Thus $r$ is an isomorphism which means that every permutation on $X$ can be extended to a linear automorphism of $V$.
Since $\langle X\rangle$ is $G$-invariant, we have $\langle X\rangle=V$.
Then $r^{-1}=\alpha_{X}$ and $G=G(X)$.
\end{proof}
\section{Permutations on finite subsets of projective spaces}
Let ${\mathcal X}$ be a finite subset of ${\mathcal P}(V)$ containing more than one element.
Denote by $S({\mathcal X})$ the group of all permutations on ${\mathcal X}$.
In this section we determine all cases when every element of $S({\mathcal X})$ can be extended to an element of ${\rm PGL}(V)$
(if $R$ is a field).
Recall that the group ${\rm PGL}(V)$ is formed by the transformations of ${\mathcal P}(V)$ induced by linear automorphisms of $V$.
Let $\pi$ be the natural homomorphism of ${\rm GL}(V)$ to ${\rm PGL}(V)$.
The kernel of $\pi$ consists of all homotheties $x\to ax$, where $a$ belongs to the center of $R$, i.e.
two linear automorphisms of $V$ induce the same element of ${\rm PGL}(V)$ if and only if one of them is a scalar multiple of the other.
We say that $P_{1},\dots,P_{m}\in {\mathcal P}(V)$ form an {\it independent} subset if
non-zero vectors
$x_{1}\in P_{1},\dots,x_{m}\in P_{m}$
are linearly independent.
Every permutation on an independent subset can be extended to an element of ${\rm PGL}(V)$.
Let $m\in \{2,\dots,n\}$.
An $(m+1)$-element subset ${\mathcal X}\subset {\mathcal P}(V)$ is called an $m$-{\it simplex}
if it is not independent and every $m$-element subset of ${\mathcal X}$ is independent.
For example, if $x_{1},\dots,x_{m}\in V$ are linearly independent and
$a_{1},\dots,a_{m}\in R$ are non-zero then
$$\langle x_{1}\rangle,\dots, \langle x_{m}\rangle\;\mbox{ and }\;\langle a_{1}x_{1}+\dots+a_{m}x_{m}\rangle$$
form an $m$-simplex.
Conversely, if $\{P_{1},\dots,P_{m+1}\}$ is an $m$-simplex then
there exist linearly independent vectors
$$x_{1}\in P_{1}\setminus\{0\},\dots,x_{m}\in P_{m}\setminus\{0\}\;
\mbox{ such that }\;
P_{m+1}=\langle x_{1}+\dots+x_{m}\rangle.$$
Every permutation on an $m$-simplex can be extended to an element of ${\rm PGL}(V)$
\cite[Section III.3, Proposition 1]{Baer}.
Following \cite[Section III.4, Remark 5]{Baer} we say that a subset ${\mathcal X}\subset {\mathcal P}(V)$ is {\it harmonic}
if there are linearly independent vectors $x,y\in V$ such that
$${\mathcal X}=\{\;\langle x \rangle,\langle y \rangle,\langle x+y\rangle,\langle x-y\rangle\;\}.$$
\begin{exmp}\label{exmp2}{\rm
Suppose that the characteristic of $R$ is equal to $3$ and ${\mathcal X}$ is the har\-mo\-nic subset
consisting of
$$P_{1}=\langle x \rangle,\;P_{2}=\langle y \rangle,\;P_{3}=\langle x+y\rangle,\;P_{4}=\langle x-y\rangle.$$
Consider $u_{1},u_{2},u_{3}\in {\rm GL}(V)$ satisfying the following conditions
$$\begin{array}{ll}
u_{1}(x)=y&u_{1}(y)=x,\\
u_{2}(x)=-x&u_{2}(y)=x+y,\\
u_{3}(x)=x&u_{3}(y)=-y.
\end{array}$$
Since the characteristic of $R$ is equal to $3$, we have
$$u_{2}(x-y)=-x-(x+y)=-2x-y=x-y.$$
A direct verification shows that every $\pi(u_{i})$
is an extension of the transposition $(P_{i},P_{i+1})$.
Since the group $S({\mathcal X})$ is spanned by all transpositions of type $(P_{i},P_{i+1})$,
every permutation on ${\mathcal X}$ can be extended to an element of ${\rm PGL}(V)$.
}\end{exmp}
\begin{theorem}\label{theorem2}
Suppose that $R$ is a field.
If every permutation on ${\mathcal X}$ can be extended to an element of ${\rm PGL}(V)$ then one of
the following possibilities is realized:
\begin{enumerate}
\item[$\bullet$] ${\mathcal X}$ is an independent subset;
\item[$\bullet$] ${\mathcal X}$ is an $m$-simplex, $m\in \{2,\dots,n\}$;
\item[$\bullet$] the characteristic of $R$ is equal to $3$ and ${\mathcal X}$ is a harmonic subset.
\end{enumerate}
\end{theorem}
\begin{lemma}\label{lemma1}
Suppose that $R$ is a field.
Let $f$ be an element of ${\rm PGL}(V)$ transferring $P\in {\mathcal P}(V)$ to $Q\in {\mathcal P}(V)$.
For any non-zero vectors $x\in P$ and $y\in Q$ there exists $u\in {\rm GL}(V)$
such that $\pi(u)=f$ and $u(x)=y$.
\end{lemma}
\begin{proof}
We take any $v\in {\rm GL}(V)$ such that $\pi(v)=f$. Then $v(x)=ay$ and the linear automorphism
$u:=a^{-1}v$ is as required.
\end{proof}
\begin{rem}{\rm
If $R$ is non-commutative then
a scalar multiple of a linear mapping is linear only in the case when the scalar belongs to the center of $R$.
}\end{rem}
\iffalse
\begin{rem}{\rm
In the non-commutative case, a scalar multiple of a linear mapping is linear if and only if the corresponding scalar
belongs to the center.
}\end{rem}
\fi
\begin{proof}[Proof of Theorem \ref{theorem2}]
Let $P_{1},\dots,P_{k}$ be the elements of ${\mathcal X}$.
If ${\mathcal X}$ is not independent then we take any maximal independent subset in ${\mathcal X}$.
Suppose that it is formed by $P_{1},\dots,P_{m}$, $k<m$ and
consider $P_{p}$ with $p>m$. Every non-zero vector $y\in P_{p}$ is a linear combination of
non-zero vectors $y_{1}\in P_{1},\dots,y_{m}\in P_{m}$.
If this linear combination contains $y_{i}$ and does not contain $y_{j}$ for some $i,j\le m$ then
an element of ${\rm PGL}(V)$ extending the transposition $(P_{i},P_{j})$
does not leave fixed $P_{p}$ which is impossible.
This means that
$$y=a_{1}y_{1}+\dots+a_{m}y_{m},$$
where all $a_{1},\dots,a_{m}\in R$ are non-zero.
Thus $P_{1},\dots,P_{m}$ and $P_{p}$ form an $m$-simplex for every $p>m$.
If ${\mathcal X}$ consists of $m+1$ elements, i.e. $k=m+1$, then ${\mathcal X}$ is an $m$-simplex.
Consider the case when $k\ge m+2$.
We choose non-zero vectors $x_{1}\in P_{1},\dots,x_{m}\in P_{m}$ such that
$$x_{m+1}:=x_{1}+\dots+x_{m}\in P_{m+1}.$$
If $p\ge m+2$ then
$$P_{p}=\langle x_{p}\rangle,\;\mbox{ where }\; x_{p}= b_{1}x_{1}+\dots+b_{m}x_{m}$$
and all $b_{1},\dots,b_{m}\in R$ are non-zero.
Let $v$ be a linear automorphism of $V$ such that $\pi(v)$ is an extension of the transposition $(P_{m+1},P_{p})$.
By Lemma \ref{lemma1}, we can suppose that $v$ sends $x_{m+1}$ to $x_{p}$.
Since $x_{1},\dots,x_{m}$ are linearly independent and $v(P_{i})=P_{i}$ for every $i\le m$,
the equality
$$v(x_{1})+\dots+ v(x_{m})=v(x_{m+1})=x_{p}=b_{1}x_{1}+\dots+b_{m}x_{m}$$
shows that $v(x_{i})=b_{i}x_{i}$ for every $i\le m$.
Then
$$v(x_{p})=b_{1}v(x_{1})+\dots+b_{m}v(x_{m})=b^{2}_{1}x_{1}+\dots+b^{2}_{m}x_{m}\in P_{m+1}$$
which means that $b^{2}_{1}=b^{2}_{2}=\dots=b^{2}_{m}$
and $b_{i}=\pm b_{j}$ for any $i,j\le m$. In other words,
$$x_{p}=b(\varepsilon_{1}x_{1}+\dots+\varepsilon_{m}x_{m}),$$
where $\varepsilon_{i}=\pm 1$ for every $i\in \{1,\dots,m\}$.
Since $x_{m+1}$ and $x_{p}$ are linearly independent,
$\varepsilon_{i}\ne\varepsilon_{j}$ for some pairs $i,j\le m$.
This guarantees that the characteristic of $R$ is not equal to $2$
and we can assume that
$$P_{p}=\langle x_{p}\rangle,\; \mbox{ where }\;
x_{p}=x_{1}+\dots+x_{q}-x_{q+1}-\dots-x_{m}
$$
and $1\le q<m$.
Now consider a linear automorphism $u\in {\rm GL}(V)$ such that $\pi(u)$ is an extension of the transposition $(P_{q},P_{q+1})$.
Then
$$u(P_{q})=P_{q+1},\;u(P_{q+1})=P_{q}\;\mbox{ and }\;u(P_{i})=P_{i}\;\mbox{ if }\;i\ne q,q+1.$$
By Lemma \ref{lemma1}, we suppose that $u$ leaves fixed $x_{m+1}$.
Since $x_{1},\dots,x_{m}$ are linearly independent,
the equality
$$u(x_{1})+\dots+ u(x_{m})=u(x_{m+1})=x_{m+1}=x_{1}+\dots+ x_{m}$$
implies that
$$u(x_{q})=x_{q+1},\;u(x_{q+1})=x_{q}\;\mbox{ and }\;u(x_{i})=x_{i}\;\mbox{ if }\;i\ne q,q+1,\;i\le m.$$
Then
$$u(x_{p})=u(x_{1})+\dots+u(x_{q})-u(x_{q+1})-\dots-u(x_{m})$$
belongs to $P_{p}$ only in the case when $q=1$ and $m=2$, i.e.
$P_{p}=\langle x_{1}-x_{2}\rangle$ for every $p\ge 4$.
The latter means that ${\mathcal X}$ is the harmonic subset consisting of
$$P_{1}=\langle x_{1} \rangle,\;P_{2}=\langle x_{2} \rangle,\;P_{3}=\langle x_{1}+x_{2}\rangle,
\;P_{4}=\langle x_{1}-x_{2}\rangle.$$
Let $w$ be a linear automorphism of $V$ such that $\pi(w)$ is an extension of the transposition $(P_{1},P_{3})$
and $w(x_{1})=x_{1}+x_{2}$ (see Lemma \ref{lemma1}).
Since $w(P_{2})=P_{2}$ and $w(P_{3})=P_{1}$, we have
$$w(x_{1}+x_{2})=w(x_{1})+w(x_{2})=(x_{1}+x_{2})+cx_{2}\in P_{1}.$$
Then $c=-1$ and $w(x_{2})=-x_{2}$.
The equality $w(P_{4})=P_{4}$ implies that
$$w(x_{1}-x_{2})=w(x_{1})-w(x_{2})=(x_{1}+x_{2})+x_{2}=x_{1}+2x_{2}\in P_{4}.$$
Hence $x_{1}+2x_{2}=x_{1}-x_{2}$ and $2=-1$, i.e. the characteristic of $R$ is equal to $3$.
\end{proof}
Every representation $\alpha: S_{m}\to {\rm GL}(V)$ induces the projective representation
$\pi\alpha:S_{m}\to {\rm PGL}(V)$.
By \cite{Schur}, there exist projective representations of symmetric groups
which are not induced by linear representations
(an explicit realization of such representations can be found in \cite{Nazarov}).
Now we establish an analogue of Corollary \ref{cor1} for projective representations of $S_{m}$.
Let ${\mathcal X}$ be a subset of ${\mathcal P}(V)$ such that
every permutation on ${\mathcal X}$ can be extended to an element of ${\rm PGL}(V)$.
Suppose that $|{\mathcal X}|\ge 2$ and there is not a proper subspace of $V$ containing every element of ${\mathcal X}$.
The following example shows that an extension of a permutation on ${\mathcal X}$
to an element of ${\rm PGL}(V)$ is not unique if ${\mathcal X}$ is a maximal independent subset
(an independent subset consisting of $n$ elements).
\begin{exmp}{\rm
Let $x_{1},\dots,x_{n}$ be a base of $V$
and let $a_{1},\dots,a_{n}$ be distinct non-zero scalars.
Consider the linear automorphism of $V$ transferring every $x_{i}$ to $a_{i}x_{i}$.
The associated element of ${\rm PGL}(V)$ is non-trivial, but it induces the identity permutation on the set consisting of
$\langle x_{1}\rangle ,\dots,\langle x_{n}\rangle$.
}\end{exmp}
\begin{prop}\label{prop1}
Let $\{P_{1},\dots,P_{n+1}\}$ and $\{P'_{1},\dots,P'_{n+1}\}$ be $n$-simplices in ${\mathcal P}(V)$.
The following two conditions are equivalent:
\begin{enumerate}
\item[$\bullet$] $R$ is a field,
\item[$\bullet$] there is a unique element of ${\rm PGL}(V)$ transferring every $P_{i}$ to $P'_{i}$.
\end{enumerate}
\end{prop}
\begin{proof}
See \cite[Section III.3]{Baer}.
\end{proof}
If $R$ is a field and ${\mathcal X}=\{P_{1},\dots,P_{n+1}\}$ is an $n$-simplex then, by Proposition \ref{prop1},
for every permutation $s\in S({\mathcal X})$ there is the unique extension $\overline{s}\in {\rm PGL}(V)$.
This correspondence is a monomorphism of $S({\mathcal X})$ to ${\rm PGL}(V)$.
Its image will be denoted by $G({\mathcal X})$.
Note that $G({\mathcal X})=\pi(G(X))$, where $X$ is formed by vectors
$$x_{1}\in P_{1},\dots,x_{n}\in P_{n}\;\mbox{ and }\;-(x_{1}+\dots+x_{n})\in P_{n+1}.$$
Suppose that $R$ is a field of characteristic $3$ and ${\mathcal X}$ is a harmonic subset.
Then every $3$-element subset of ${\mathcal X}$ is a $2$-simplex and
Proposition \ref{prop1} guarantees that
every permutation on ${\mathcal X}$ is uniquely extendable to an element of ${\rm PGL}(V)$.
As above, we get a monomorphism of $S({\mathcal X})$ to ${\rm PGL}(V)$
and denote its image by $G({\mathcal X})$.
\begin{cor}\label{cor2}
Let $R$ be a field and let $G$ be a subgroup of ${\rm PGL}(V)$ isomorphic to $S_{m}$.
Let also ${\mathcal X}$ be an orbit of $G$ such that $G$ acts faithfully on ${\mathcal X}$ and $|{\mathcal X}|=m$
\footnote{As in Corollary \ref{cor1}, if ${\mathcal X}$ is an orbit of $G$ and $G$ acts faithfully on ${\mathcal X}$ then $|{\mathcal X}|\ge m$.}.
Suppose that there are not proper $G$-invariant subspaces of $V$
\footnote{A subspace $S\subset V$ is $G$-invariant if every element of $G$ transfers ${\mathcal P}(S)$ to itself.}.
Then the following assertions are fulfilled:
\begin{enumerate}
\item[$\bullet$] ${\mathcal X}$ is a maximal independent subset or an $n$-simplex or
${\mathcal X}$ is a harmonic subset and the characteristic of $R$ is equal to $3$;
\item[$\bullet$]
if ${\mathcal X}$ is not independent then $G=G({\mathcal X})$.
\end{enumerate}
\end{cor}
The proof is similar to the proof of Corollary \ref{cor1}.
|
{
"timestamp": "2012-06-28T02:04:56",
"yymm": "1206",
"arxiv_id": "1206.6340",
"language": "en",
"url": "https://arxiv.org/abs/1206.6340"
}
|
\section{INTRODUCTION}
\label{s-intro}
It is widely accepted that feedback powered by active galactic nuclei (AGN) has a key role in galaxy formation and
in cooling flows in galaxies and in clusters of galaxies.
In galaxy formation AGN feedback heats and expels gas {{{ from the galaxy}}} (e.g., \citealt{Bower2008, Ostriker2010} and references therein),
and by that can determine the correlation between the central super-massive black hole (SMBH) mass and some
properties of the galaxy {{{ \citep{King2003,King2005,Soker2009b,Soker2011}}}}.
In cooling flow clusters jets launched by the SMBH heat the gas and maintain
a small, but non zero cooling flow (see review by \citealt{McNamara2007, McNamara2012, Fabian2012});
this is termed a moderate cooling flow.
There is a dispute on how the accretion onto the SMBH occurs, in particular in cooling flows.
One camp argues for accretion to be of hot gas via the Bondi accretion process
(e.g., \citealp{Allen2006, Russell2010, Narayan2011}),
while the other side argues that the accretion is of dense and
cold clumps in what is termed the cold feedback mechanism {{{ \citep{Pizzolato2005,Pizzolato2010}}}}.
The cold feedback mechanism has been strengthened recently by observations
of cold gas and by more detailed studies \citep{Revaz2008,Pope2009,Wilman2009,Wilman2011,Nesvadba2011,Cavagnolo2011,Gaspari2012a,Gaspari2012b,
McCourt2012,Sharma2012,Farage2012,Kashi2012}.
The Bondi accretion process, on the other hand, suffers from two problems.
The first problem is that in cooling-flow clusters the Bondi accretion rate
is too low to account for the AGN power
(e.g., \citealt{McNamara2011, Cavagnolo2011}).
The second is that there is no time for the feedback to work \citep{Soker2009}.
{{{ This is because the time for cooling gas at distances of $\ga {\rm few} \times {~\rm kpc}$
in the Bondi accretion process to be accreted and power jets
that heat back the ISM, is much longer than the cooling time of the gas. }}}
This is already true for gas cooling at a moderate distance of $\sim 1 {~\rm kpc}$ from the center.
In other words the gas at large distances has no time to communicate with the SMBH before it cools.
In this paper we point out yet another problematic point with the Bondi accretion process.
In a recent paper, \citet{Wong2011} resolved the region within the Bondi accretion radius of the S0 galaxy NGC~3115.
If the density and temperature profile is interpreted as resulting from a Bondi
accretion flow onto the $M_{\rm BH}=2 \times 10^9 M_\odot$ central SMBH, the derived accretion rate is
$\dot M_B=2.2 \times 10^{-2} M_\odot {~\rm yr}^{-1}$.
They note that for a radiation power of $0.1 \dot{M_B}\, c^2$, the expected accretion luminosity is
six orders of magnitude above the observed upper limit.
They attribute this to a process where most of the inflowing gas is blown away,
{{{ or the gas is continuously circulating in convective eddies,
or to that the region they resolve is not yet incorporated to the Bondi accretion flow.
The idea of circulating eddies has some similarities to the density inversion layer behavior we discuss here. }}}
{{{ In any case, some AGN activity does take place in NGC~3115 \citep{Wrobel2012}.
\cite{Wrobel2012} detected a radio nucleus in NGC~3115 with a radio power of $L_{\rm radio}=3 \times 10^{35} {~\rm erg} {~\rm s}^{-1}$.
This indicates the presence of a weak AGN, that might substantially reduce the accretion rate \citep{Wrobel2012}.
As we discuss later, the feeding of the SMBH might be from the stellar winds rather than from the ISM. }}}
{{{ Several other processes were considered to reduce the accretion rate by a SMBH much below the Bondi accretion rate.
Such processes include magnetic field reconnection \citep{Igumenshchev2002}, angular momentum \citep{Proga2003a,Proga2003b},
magneto-thermal instabilities \citep{Sharma2008}, and instabilities due to self-gravitation of the infalling gas \citep{Levine2010}.
Lack of spherical symmetry in realistic situations is an additional factor \citep{Debuhr2011}.
Turbulent media can have higher than Bondi-Hoyle accretion rate, but due to vorticity, a lower
accretion rate is also possible \citep{Krumholz2005,Krumholz2006}.
\cite{Hobbs2012} claim that the Bondi-Hoyle solution is only relevant for hot
virialized gas with no angular momentum and negligible radiative cooling.
}}}
We take a different view
{{{ on the suppression of the Bondi accretion.
We argue that in many galaxies for a fraction of the time the Bondi accretion flow
might not be relevant because }}}
one cannot assume a zero pressure at the center, either because of
stellar winds or because of jets blown by the AGN.
\section{THE PRESSURE OF STELLAR WINDS}
\label{s-ramstars}
The pressure exerted by stellar winds of high velocity stars (i.e., moving much faster than the dispersion velocity in the galaxy)
with an average mass loss rate per star of $\dot m_\ast$ can be calculated in two limits, which basically lead to the same result.
First we calculate the pressure by considering the total outward momentum flux at radius $r$.
{{{ Because the orbital velocities of stars around the SMBH are much larger than the typical
velocities of the stellar winds (as most of the mass loss is during the asymptotic giant branch, AGB, phase), }}}
the relevant velocity in general is not that of the wind relative to the star, but
rather the velocity of the star under the gravitational influence of the SMBH,
\begin{equation}
u_\ast (r) \simeq \sqrt{\frac{G M_{\rm BH}}{r}} = 2 \times 10^3
\left( \frac{M_{\rm BH}}{10^9 M_\odot} \right) ^ {1/2}
\left( \frac{r}{{~\rm pc}} \right) ^ {-1/2} {~\rm km\; s^{-1}}.
\label{eq:vbh1}
\end{equation}
This holds as long as the SMBH gravity dominates that of the galaxy.
In NGC~3115 that we study in more detail in section \ref{s-ngc3115}, for example, the SMBH gravity dominates that
of the galaxy to a distance of $\sim 30 {~\rm pc}$ as the black hole mass is $M_{\rm BH}=2 \times 10^9 M_\odot$.
Let stellar winds from high-velocity stars dominate the pressure inside a sphere of radius $R_h$.
The pressure exerted by the wind on a surface of radius $R_h$ is approximately given by adding the ram pressures of winds from all stars inside the sphere of radius $R_h$,
\begin{equation}
P_{m\ast} (R_h) \simeq n_\ast \eta \dot m_\ast u_\ast (R_h) \frac{4 \pi R_h^3}{3} \frac{1}{4 \pi R_h^2},
\label{eq:ramp1}
\end{equation}
where $n_\ast$ is the stellar {{{ number}}} density in the center of the galaxy, and $\eta$ is the fraction of the mass
lost by stars that is shocked and heats up.
In all our expressions the stellar mass loss rate appears as $\eta \dot m_\ast$.
Some of the mass lost by stars will form dense clumps that will cool rapidly even
if being shocked, or will not even be shocked.
This is particularly true as most of the mass is being lost by AGB stars that have dense winds.
The thermal pressure of the ISM in the center will cause part of the winds' gas to form dense clouds.
{{{ Many of the cold clumps can be evaporated by heat conduction form the hot gas in the bubble.
However, some clumps might flow inward and feed the SMBH, and explain the AGN activity observed by \cite{Wrobel2012}. }}}
The average mass loss rate is calculated as follows. A solar-like star loses $\sim 0.5 M_\odot$ over $\sim 10^{10} {~\rm yr}$.
Considering an old population of stars, the mass loss rate is lower even.
More accurately, most of the mass loss is due to AGB stars, which live for $\sim 10^7 {~\rm yr}$, and lose mass
at an average rate of $\sim 10^{-7} M_\odot {~\rm yr}^{-1}$ \citep{Willson2007}.
During the final stages of the AGB the evolution is faster and the mass loss rate is higher.
If there is a young stellar population, the total mass loss rate can be much higher.
The ram pressure will not increase much beyond few~pc because the stellar density decreases.
An alternative point of view would be to express the pressure as (roughly) the energy density of the shocked stellar wind.
We also assume a constant pressure and density inside this sphere.
{{{ This is justified because we are interested mainly in the outer part of the hot bubble,
where density inversion might take place. Even a steep power law profile, say of $\rho \sim r^{-2}$,
will not change much the density from $0.5 R_h$ to $R_h$, which contains 0.875 of the volume of the bubble. }}}
We can calculate the rate of energy input and multiply by the time it takes the hot gas to leave the inner region
\begin{equation}
\tau_{\rm esc} = \frac{R_h}{\beta u_\ast (R_h)},
\label{eq:pe11}
\end{equation}
where $\beta \la 1$ takes into consideration that the hot gas at the center escapes at velocity
lower than the escape velocity.
The stellar wind pressure in this case can be written as
\begin{equation}
P_{e\ast} = \frac{2}{3} \frac{\dot{E}}{V}\frac{R_h}{\beta u_\ast (R_h)},
\label{eq:pe1}
\end{equation}
where the energy deposition rate is
\begin{equation}
\dot{E}=\int_0^{R_h}{\left(\frac{1}{2} n_\ast \eta \dot{m_\ast} u_\ast^2 (r) \right) 4\pi r^2 dr} =
2 \pi G M_{\rm BH} \eta \dot{m_\ast} \int_0^{R_h} {n_\ast(r) r dr}.
\label{eq:e1}
\end{equation}
Scaling the different quantities and assuming a constant stellar density we find
\begin{equation}
P_{e\ast}
= 3 \times 10^{-8} \beta^{-1} \eta
\left( \frac {M_{\rm BH}} {10^9 M_\odot} \right)^{1/2}
\left( \frac {R_h} {1 {~\rm pc}} \right)^{1/2}
\left( \frac {n_\ast} {5\times 10^5 {~\rm pc}^{-3}} \right)
\left( \frac {\dot m_\ast } {10^{-10} M_\odot {~\rm yr}^{-1} } \right) {~\rm erg} {~\rm cm}^{-3},
\label{eq:pe2}
\end{equation}
where the stellar density is scaled by the average stellar density within $\sim 3 {~\rm pc}$ from
the center of NGC~3115 \citep{Kormendy1996}.
Equations (\ref{eq:pe1}) and (\ref{eq:pe2}) are more accurate than equation (\ref{eq:ramp1}) when the radiative cooling time
of the colliding stellar winds is larger than the escape time $\tau_{\rm esc}$, which is the case here due to the high-temperature low-density
post-shock stellar winds.
{{{ The radiative cooling time is $\tau_c = (5/2) nk T/(n_e n_ p \Lambda) \simeq 10^7-10^8$ years,
This is much longer than the escape time given in equation (\ref{eq:pe11}) $\tau_{\rm esc} \simeq 10^2-10^3$ years.
Here $n_e$, $n_p$, and $n$ are the electron, proton, and total number density, respectively, and $\Lambda$ is the
cooling function. }}}
Therefore, from now on we will refer to the hot gas region formed by the shocked stellar winds as the hot bubble,
{{{ and to its radius as $R_h$. }}}
For a constant stellar density within radius $r$, we find $P_{e\ast} = \frac{3}{2} \beta^{-1} P_{m\ast}$.
If the stellar density drops to zero at some radius $r_z$ (a nonrealistic ideal case), the pressure beyond $r_z$
will drop like $(r/r_z)^{-2}$.
The average density of the hot shocked stellar wind is given by
\begin{equation}
\rho_w \simeq \left( \frac{4 \pi}{3} R^3_h \right)^{-1} \eta \dot{m_\ast} \frac{R_h}{\beta u_\ast(R_h)}
\int_0^{R_h} 4 \pi {n_\ast(r) r^2 dr}
\label{eq:rho1}
\end{equation}
The flow structure is schematically drawn in Fig. \ref{fig:fig1}.
{{{ Relevant to this flow structure is the simulations of \cite{Cuadra2008}. They simulated the dynamics of stellar
winds in the Galactic center and found the accretion rate to be highly variable, due in part to the stochastic nature of infalling cold clumps.
\cite{Fryer2007} suggest that the inner $\sim 5 {~\rm pc}$ region surrounding Sgr A$^{\ast}$ in our Galaxy
can be approximated by a wind-blown hot bubble density structure.
}}}
\begin{figure}[htb]
\begin{center}
\includegraphics[width=0.75\textwidth]{bondi.eps}
\caption{A schematic drawing (not to scale) of the flow structure where a hot bubble, formed by stellar winds of high-velocity stars orbiting the
central SMBH, exerts pressure on the ISM residing outside radius $R_h$.
If the density in the hot bubble is lower than the ISM density, the flow at $R_h$ is RT-unstable and a density-inversion layer is formed.
{{{ Most clumps that are formed in the winds collision process are later evaporated by heat conduction from the hot bubble to
the clumps. Some, thought.
are accreted by the SMBH and explain the weak AGN activity observed by \cite{Wrobel2012}. }}} }
\label{fig:fig1}
\end{center}
\end{figure}
\section{THE CASE OF NGC~3115}
\label{s-ngc3115}
At the Bondi radius $R_B \simeq 210 {~\rm pc}$ of the galaxy NGC~3115 the ISM pressure is
$P(R_B)=2 \times 10^{-11} {~\rm erg} {~\rm cm}^{-3}$, the electron number density is $n_e (R_B)=0.02 {~\rm cm}^{-3}$,
and the temperature is $T(R_B)=3.5 \times 10^6 {~\rm K}$ \citep{Wong2011}.
The Bondi radius is given by
\begin{equation}
R_B \simeq \frac{2 G M_{\rm BH}}{c_s^2} = 220
\left( \frac {M_{\rm BH}} {2 \times 10^9 M_\odot} \right)
\left( \frac {T} {3.5 \times 10^6 {~\rm K} } \right) {~\rm pc},
\label{eq:rb1}
\end{equation}
where $c_s$ is the sound speed in the undisturbed gas.
The temperature and electron density increase inward, reaching values of
$T_{20} \simeq 10^7 {~\rm K}$ and $n_{e20} \simeq 0.3 {~\rm cm}^{-3}$ at $r=20 {~\rm pc}$ (\citealt{Wong2011}; no values are given at smaller radii).
We also note that in NGC~3115 the BH gravity dominates that of the galaxy to a distance of $\sim 30 {~\rm pc}$ as the
black hole mass is $M_{\rm BH}=2 \times 10^9 M_\odot$.
The average density and pressure of the hot bubble according to equations (\ref{eq:rho1}) and (\ref{eq:pe2}),
are drawn in Fig. \ref{fig:Pr2} for a SMBH mass of $M_{\rm BH}=2 \times 10^9 M_\odot$, and a stellar density given by
\begin{equation}
n_\ast = 5\times 10^5 {~\rm pc}^{-3}
\begin{cases}
1, & r \le 3 {~\rm pc} \\
(r / 3 {~\rm pc})^{-3}, & r > 3 {~\rm pc},
\end{cases}
\label{eq:nast1}
\end{equation}
and for $\beta=1$ (eq. \ref{eq:pe11}) and $\eta=0.1$ (eq. \ref{eq:ramp1}).
The density within $r = 3 {~\rm pc}$ is from \cite{Kormendy1996}, while at $r > 3 {~\rm pc}$ is our assumption.
{{{ The particular form of the decline in stellar density at $r>3 {~\rm pc}$ has no significant consequences,
and the particular power law was chosen for the sake of simplicity and definite calculations. }}}
The value of the mass loss efficiency, which is the fraction of the mass lost by stars that ends up as hot gas
in the hot bubble, is chosen as $\eta=0.1$ to more or less match the pressure and density of the ISM at $r=20 {~\rm pc}$.
It is a parameter of the model that should be typically in the range of $\sim 0.1-1$.
The temperature that is calculated from the pressure is also drawn on Fig. \ref{fig:Pr2}.
Beyond $\sim 30 {~\rm pc}$ the average temperature is only $\sim 2$ times as large as the virial temperature of the cluster,
and our assumptions of a hot bubble become inadequate.
\begin{figure}[htb]
\begin{center}
\includegraphics[width=0.75\textwidth]{bubble.eps}
\caption{The average density and pressure of the hot bubble according to equations (\ref{eq:rho1}) and (\ref{eq:pe2}),
as well as the temperature that is calculated from the pressure for the stellar density profile given in equation \ref{eq:nast1}.
The escape velocity parameter is $\beta=1$ (eq. \ref{eq:pe11}), and the mass loss parameter of $\eta=0.1$ (eq. \ref{eq:ramp1}) is taken
to crudely fit the ISM properties of NGC~3115 at $r=20 {~\rm pc}$, {{{ shown in the figure as the horizontal lines}}}.
}
\label{fig:Pr2}
\end{center}
\end{figure}
The following conclusions emerge from Fig. \ref{fig:Pr2}.
(1) The pressure of the shocked stellar winds of the high-velocity circum-SMBH stars is larger than the
ISM pressure near the center, even for a mass loss efficiency of only $\eta \sim 0.1$.
This accounts, we argue, for the accretion rate of NGC~3115 being much lower than the Bondi accretion rate \citep{Wong2011}.
(2) At the center, $r < 3 {~\rm pc}$, the rate of mass loss into the hot gas per unit volume is
$\dot \chi \equiv (n_\ast \eta \dot m_\ast)_c = 5 \times 10^{-6} M_\odot {~\rm pc}^{-3} {~\rm yr}^{-1}$.
Even if this value is ten times lower, a hot bubble with pressure larger than the ISM pressure of NGC~3115 can still be formed.
(3) For $\dot \chi \la 10^{-5} M_\odot {~\rm pc}^{-3} {~\rm yr}^{-1}$ the hot bubble's density is lower than that of the ISM.
This structure is Rayleigh-Taylor (RT) unstable. This structure is analyzed below.
{{{ We note that the structure presented here is a temporary one. Eventually, the gas in the center originated from stellar winds
will radiatively cool and form cold clumps. Some will be accreted and amplify the AGN activity.
Many other clumps will be evaporated by the hot bubble and by the new AGN activity.
Accretion of clumps onto a SMBH in a turbulent medium was studied by \cite{Hobbs2011}, and accretion of cold clumps onto
Sgr A$^{\ast}$ in the Milky Way was simulated by \cite{Cuadra2008}. }}}
\section{A TENUOUS HOT BUBBLE FORMED BY STELLAR OR AGN WINDS}
\label{s-hotbubble}
We found above that in some cases the hot bubble that formed by the stellar winds of circum-SMBH high-velocity stars
can have a lower density than the ISM while its pressure is about equal to the ISM pressure $P_{\rm ISM}$.
This situation is prone to RT instability.
The same might hold for AGN winds.
The power of the winds that is required to form a hot bubble that can support the ISM is
\begin{equation}
W_{\rm wind} \simeq \frac{3}{2}P_{\rm ISM} V \tau_{\rm esc}^{-1}
= 3 \times 10^{37} \beta
\left( \frac{T n_e}{10^7 {~\rm K} {~\rm cm}^{-3}} \right)
\left( \frac {M_{\rm BH}}{10^9 M_\odot} \right)^{1/2}
\left( \frac {R_h} {1 {~\rm pc}} \right)^{3/2} {~\rm erg} {~\rm s}^{-1}
\label{eq:w1},
\end{equation}
where the escape time $\tau_{\rm esc}$ is given by equation (\ref{eq:pe11}), and $V$ is the volume of the hot bubble.
This implies that even a very weak AGN wind can form such a bubble.
With an efficiency of $1 \%$, namely, $W_{\rm wind}=0.01 \dot M_{\rm BH} c^2$, the required accretion rate
is $\dot M_{\rm BH} = 5 \times 10^{-8} M_\odot {~\rm yr}^{-1}$.
For comparison, we note that the Chandra upper limit on the luminosity of NGC~3115 is $ \sim 10^{38} {~\rm erg} {~\rm s}^{-1}$ \citep{Diehl2008},
{{{ and the radio power is $L_{\rm radio}=3 \times 10^{35} {~\rm erg} {~\rm s}^{-1}$ \citep{Wrobel2012}.
The luminosity of the hot bubble as studied here has a low X-ray luminosity compared with the external gas.
First, the volume of the bubble is very small.
Second, the density inside the bubble is lower than that of the surrounding gas, and hence its
emissivity is lower. Even if more of the stellar wind incorporated to the bubble, the X-ray luminosity
from the bubble is much below detection limits.
}}}
The flow structure considered in this section has the following properties.
The hot bubble is continuously supplied by hot gas from the shocked stellar winds or the AGN wind or jets.
A pressure equilibrium is maintained between the hot bubble and the ISM, and a
structure of a hot tenuous gas supporting a denser and cooler gas is achieved.
This structure is RT unstable.
Such a structure, we claim, is similar to the density inversion found in the outer atmosphere of
red giant stars (e.g., \citealt{Harpaz1984, Freytag2008}), but not identical.
At the outer edge of the recombination zone of hydrogen in red giant stars the convection heat transfer
becomes less efficient.
The requirement to transfer energy leads to a steep temperature gradient that in turn causes a density inversion,
i.e., the density increases outward (e.g., \citealt{Harpaz1984}).
This occurs in the convective region, which is already unstable.
In the density-inversion layer in stars, therefore, cold convective cells fall and hot convective cells
buoy outward.
We suggest that the same process occurs in the flow structure discussed here.
There are some basic differences in the properties of the density-inversion layers of stars and of the case studied here.
The main differences are that the hot gas in our case buoys to large distances, and fresh gas from
stellar wind or the AGN replaces it.
Also, the entire region is optically thin, unlike stars where it is optically thick.
In stars the width of the density-inversion region is determined by heat transfer requirements, whereas in our
case it is determined by dynamics, mixing, and local heat conduction.
In stars the density scale height is not much shorter than the pressure scale height $l_p$.
The size of the convective cells is taken to be of the order of the pressure scale height.
In our case the density can change by an order of magnitude from the inner tenuous region to the denser outer ISM,
and we expect the RT instability to break the cells to smaller cells.
We therefore take the size of the rising and falling gas elements to be $R_c \ll l_p$.
We take the density-inversion zone to be of the order of the pressure scale height (in stars it can be much smaller).
For a central gravity source the pressure scale height for a constant temperature is given by
\begin{equation}
l_p = {R_h} \left[ \frac{C_i}{u_{\ast} (R_h)} \right]^2,
\label{ee:lp1}
\end{equation}
where $C_i$ is {{{ the}}} isothermal sound speed,
{{{ and $u_\ast(R_h)$ is the stellar velocity given in equation (\ref{eq:vbh1}) and evaluated at the radius of the hot bubble $R_h$.
The shocked stellar wind will be heated to a temperature of $T \approx (3/16) m u_{\ast}^2/k$,
where $m$ is the mean mass per particle in the gas. The sound speed is
$[(5/3) kT/m]^{1/2} \approx 0.6 u_{\ast}$. Thus, we can take $l_p \sim R_h$.}}}
Therefore, we assume first that the width of the density-inversion layer is $\Delta r_i \sim R_h$.
Consider then a spherical parcel of gas (a blob) of radius $R_c$ and density of $\rho_c$ moving with a terminal
velocity $v_b$ through an external medium of density $\rho_e$.
The buoyancy force on the blob is
\begin{equation}
F_b = \left( \rho_e - \rho_c \right) \frac{4}{3} \pi R_c^3 g,
\label{ee:vt1}
\end{equation}
{{{ where $g$ is the gravitational acceleration.}}} The drag force on the bubble is
\begin{equation}
F_d \approx \frac{1}{2} C_D \pi R_c^2 \rho_e v_t^2,
\label{ee:vt2}
\end{equation}
where $C_D \simeq 0.75$ (\citealt{Kaiser2003}).
Assuming $\rho_c \ll \rho_e$ and taking $g = u_\ast^2 / R_h$, the terminal velocity of the bubble is
\begin{equation}
v_t \approx \left( {\frac{8}{3 C_D}} \right)^{1/2}
\left( \frac{R_c}{R_h} \right)^{1/2} u_\ast = \beta u_\ast,
\label{ee:vt3}
\end{equation}
where in the second equality we identify the terminal velocity as the velocity by which the hot gas {{{ escapes}}} from
the hot bubble outward, with
\begin{equation}
\beta \simeq 0.6 \left( \frac{R_c}{0.1R_h} \right)^{1/2} .
\label{ee:beta1}
\end{equation}
Complex processes take place in the density-inversion layer.
(1) Heat conduction time scale over a distance of $\Delta r_T = R_c \sim 0.1 {~\rm pc}$ and a temperature difference of $\Delta T=10^7 {~\rm K}$,
is few$\times 10 {~\rm yr}$. This is shorter than the fall time of a dense clump from $\sim 1 {~\rm pc}$. Therefore, the hot bubble gas heats the
clump by heat conduction. Closer to the center, the clump will be shredded to smaller cells. Hence, before the dense ISM clumps can reach the center
{{{ they}}} will be evaporated. This is not true for denser and cooler blobs that fall inward, as in the cold feedback mechanism \citep{Pizzolato2005}.
(2) Because of the stellar motion and/or AGN activity, the density-inversion layer is expected to be more chaotic than just a RT-unstable region.
There will be vortices that will increase mixing, namely, {{{ reduce}}} the effective value of $\Delta r_T$.
\section{DISCUSSION AND SUMMARY}
\label{s-summary}
We studied the pressure exerted by the winds of circum-SMBH high-velocity stars on the surrounding ISM.
We found that in some cases this pressure is significant and can substantially suppress the inflow of the
ISM relative to what a simple Bondi accretion would give.
Our result can explain the finding of \citet{Wong2011} that the Bondi accretion rate calculated by them from
the ISM density and temperature is six orders of magnitude above the observed upper limit on the accretion rate in the S0 galaxy NGC~3115.
In section \ref{s-ngc3115} we quantitatively examined the situation in the galaxy NGC~3115.
Shocked winds of circum-SMBH high-velocity stars form a bubble of hot gas whose pressure is significant, as evident from
Fig. \ref{fig:Pr2}.
{{{ The colliding winds heat up to very high temperatures, build significant pressure, and are not expected to be accreted by the SMBH
even though they lose angular momentum.
Cooler clumps that fall inward, from the ISM or from inhomogeneities within the hot bubble, will encounter the winds
of fast-moving stars very close to the SMBH. This collision will heat such clumps, suppressing their accretion.
Even if there is a small accretion rate, a very weak disc wind from the accretion disc might
further lower the accretion rate. The study of the interaction of AGN winds with the gas
near the SMBH is a subject of a future study using numerical simulations.
}}}
There are some uncertainties in the model, such as the exact behavior of the stellar mass loss,
trajectories of stars around the SMBH, and the stochastic behavior of the post-shock stellar winds.
Some of these will be studied in future numerical simulations. However, the result that the stellar winds cannot be
ignored is robust.
For some values of the parameters we found that a situation might arise where the hot bubble's density is lower than the ISM density.
In this case, Rayleigh-Taylor (RT) instability takes place, and a density-inversion layer is formed (see schematic description in Fig. \ref{fig:fig1}).
Although hot tenuous gas buoys outward and dense ISM gas moves inward, the density-inversion layer itself continues to exist.
The ISM gas is heated near the center and accumulated {{{ into}}} the hot bubble.
{{{ While the scenario suggested here may explain the low X-ray luminosity observed in the galaxy NGC~3115,
its properties have not yet been observed or affirmed directly.
The size of the hot bubble described is below the resolution limit of the observations and cannot yet be observed.
Alternative explanations for a below-Bondi accretion rate are mentioned in section \ref{s-intro}.
}}}
{{{ We note that in our scenario there can be no steady state over a very long time of $ \sim 10^7-10^8 {~\rm yr}$.
Over this time scale radiative cooling becomes important and more of the cooling gas will be accreted by the SMBH.
This will lead to stronger AGN activity that will heat and expel gas, hence reducing back the accretion rate and AGN power.
In addition stellar formation must occur from time to time.
Most likely, there are local star-burst episodes when the accretion rate is much higher than the Bondi accretion rate.
The high accretion rate is probably driven by cold clumps (filaments, streams).
Indeed, the stellar-wind pressure cannot prevent accretion of very dense clouds.}}}
Our result is more general in showing that in many cases the Bondi accretion process does not work because
one of its basic assumptions, that there is no central pressure, breaks down.
This is one of several reasons why the Bondi accretion model may not apply in some cases (see section \ref{s-intro}).
Finally, we note that our model may be relevant for active galaxies where the hot bubble might be formed by the AGN jets or winds.
For typical values of AGN jets and winds the hot bubble density will be low, and a density-inversion layer will be formed.
We expect this process to be of high significance in the process of AGN feedback acting in young galaxies.
Barring Bondi-like accretion, dense and cold clumps in the ISM can still flow inward and feed the SMBH.
Namely, AGN feedback mechanisms require the feeding to be by cold clumps, i.e., a cold feedback mechanism.
{{{ We thank an anonymous referee for many detail and very helpful comments that substantially improved the manuscript. }}}
This research was supported by the Asher Fund for Space Research and the E. and J. Bishop Research Fund at the Technion,
and the Israel Science foundation.
|
{
"timestamp": "2013-08-23T02:03:03",
"yymm": "1206",
"arxiv_id": "1206.6029",
"language": "en",
"url": "https://arxiv.org/abs/1206.6029"
}
|
\section{Introduction}
Following initial work by Yang and
coauthors\cite{Yang2000,MoriSanchez2006,Cohen2008,MoriSanchez2009}
on non-interacting ensembles\cite{Perdew1982} with spin-resolved
fractional occupancy, much consideration has been given to the behaviour
of density functional theory (DFT) under the Kohn-Sham (KS)
prescription\cite{HohenbergKohn,*KohnSham}, and its
various common approximations (eg. LDA\cite{KohnSham}, GGA\cite{GGA},
Becke-like\cite{Becke1988}, OEP\cite{OEP1,*OEP2}) in such ensembles.
Many attempts have been made to understand and deal with the issues that
arise in ensembles (see eg. \rcites{Vydrov2007,Cohen2007,Johnson2011}),
with variable success.
We will show that, in such systems, the notion of `correlation' physics
becomes intertwined with `exchange' and `Hartree' physics in
the usual prescription, with (improvable) consequences for
common approximations.
Let us begin by considering, quite generally,
the nature of `electron correlation' and `electron exchange'
in a non-ensemble system. The usual expression for the groundstate
correlation energy can be written as
\begin{equation}
\Ec=
\ibraketop{\Psi}{\hat{H}}{\Psi}-\ibraketop{\Psi^T}{\hat{H}}{\Psi^T}
\label{eqn:Ec}
\end{equation}
where $\hat{H}$ is the Hamiltonian of a many-electron system,
$\iket{\Psi}$ is its groundstate wavefunction, and $\iket{\Psi^T}$ is
some approximation to the wavefunction (by the variational principle,
correlation energy is never positive). Thus correlation
is not an intrinsic property of the system, but a property of the
chosen trial wavefunction. In standard
optimised effective potential (OEP) approaches\cite{OEP1,*OEP2},
including KS DFT, $\iket{\Psi^T}$ takes the form of a single
Hartree-Fock like Slater determinant which is
constructed from one-particle orbitals $\iket{i\sigma}$
evaluated in a \emph{common} one-particle Hamiltonian
$\hat{h}=\th + \Vh$\footnote{we use
atomic units throughout this work such that lengths are in Bohr radii
($1a_0=0.53$\AA) and energies are in Hartree ($1{\rm{Ha}}=4.36$aJ)}
where $\th\equiv-\half\nabla^2$ and $\Vh\equiv V_{\sigma}(\vr)$.
We can now define the exchange energy
$\Ex=\ibraketop{\Psi^T}{\hat{H}}{\Psi^T}-\bar{E}$
and the ``naive Hartree'' energy of the system\footnote{The original
``true Hartree'' theory explicitly excluded orbital
self-interaction, but the ``naive'' form is traditionally used as a
reference in KS DFT}
$\bar{E}=\sum_{i\sigma}\ibraketop{i\sigma}{\th+\Vh_{\Ext}}{i\sigma}
+ \half\int\frac{\d\vr\d\vrp}{|\vr-\vrp|}n(\vr)n(\vrp).$
Here $n(\vr)=\braketop{\Psi^T}{\hat{n}(\vr)}{\Psi^T}
=\sum_{i\sigma}|\phi_{i\sigma}(\vr)|^2$
[where $\hat{n}(\vr)$ is the electron number density operator
and $\phi_{i\sigma}(\vr)=\ibraket{\vr}{i\sigma}$]
and $\Vh_{\Ext}\equiv V_{\Ext}(\vr)$ is the external potential.
The groundstate energy is thus $E=\bar{E}+\Ex+\Ec$ where the
partitioning depends on both the choice of $\bar{E}$ and $\iket{\Psi^T}$.
This can be extended into ensembles by replacing projections on
wavefunctions $O=\ibraketop{\Psi}{\hat{O}}{\Psi}$
by traces on density matrices $O=\tr[\rhoh\hat{O}]$ (where operators
act appropriately for any number of electrons) and
by summing $\bar{E}$ over ensemble members.
The density matrix $\rhoh$ is defined as
\begin{align}
\rhoh=\sum_{\FE}w_{\FE}\iket{\Phi_{\FE}}\ibra{\Phi_{\FE}}
\end{align}
where $0\leq w_{\FE}\leq 1$ is the weight of member $\FE$ with
wavefunction $\iket{\Phi_{\FE}}$ and $\sum_{\FE}w_{\FE}=1$.
Minimisations can then be carried out over $\rhoh$ rather than $\iket{\Phi}$.
\section{Exact exchange approaches}
We can now succinctly define the standard `exact exchange' (EXX)
functional approach. Here we consider only $E^{\EXX}=\bar{E}+\Ex$ with
$\Ec$ assumed to be zero.
Investigations into EXX in fractionally occupied ensemble
systems\cite{MoriSanchez2006,Cohen2009,Makmal2011,Hellgren2012-2}
show both successes and shortcomings (discussed in more detail later).
In all these works, the Hartree and exchange energy takes the
`standard' form, \emph{bilinear} in the occupations $\fsigma_i$:
\begin{align}
E^S_{\Hx}=&\int\frac{\d\vr\d\vrp}{2|\vr-\vrp|}
\sum_{i\sigma j\sigma'}\fsigma_i\fsigmap_j
[P_{i\sigma j\sigma'}-\delta_{\sigma\sigma'}Q_{i\sigma j\sigma'}]
\label{eqn:NEHx}
\end{align}
where $P_{i\sigma j\sigma'}=|\phi_{i\sigma}(\vr)|^2|\phi_{j\sigma'}(\vrp)^2|$
and $Q_{i\sigma j\sigma}=\phi_{i\sigma}(\vr)\phi_{i\sigma}^*(\vrp)
\phi_{j\sigma}^*(\vr)\phi_{j\sigma}(\vrp)$.
Here the negative exchange term cancels
the unphysical positive Hartree interaction of each spin orbital
$\iket{i\sigma}$ with itself. However if two different orbitals
of the same spin are partly occupied
($0<\fsigma_i,\fsigma_j<1$ with $i\neq j$),
or if there is partial occupation of both spins in the same orbital
($0<\fup_i,\fdown_i<1$), there is a
corresponding cross-term in \eqref{eqn:NEHx} that is not cancelled.
In a slightly different context
Gidopoulos \emph{et~al.}\cite{Gidopoulos2002} call this spurious
term the ``ghost interaction'' as it represents an unphysical
interaction between orbitals in
different \emph{non-interacting} ensemble members.
In the regular EXX energy expression \eqref{eqn:NEHx},
the ghost interaction appears in the Hartree and exchange energy terms
involving pairs of orbitals in the frontier orbital.
In a Kohn-Sham interpretation of the equivalent diatom problem,
these interactions would be supressed in the total energy
via orthogonality of the degenerate groundstate wavefunctions.
However, when one does not have the exact exchange-correlation
functional, or as here neglects correlation, it can reappear,
particularly when one does not properly account for the ensemble
nature of the system.
We will argue that, in the ensemble interpretation of
partial occupation\cite{Yang2000,MoriSanchez2006,Cohen2008},
this cross term should not be present,
and its explicit removal results in an improved linear
exact exchange (LEXX) approach which is correctly piecewise
\emph{linear}, not bilinear, in the occupation factors $f$. Here,
defining $\tsigma_{i\FE}$ to be one for orbital $\iket{i\sigma}$
occupied in ensemble member $\FE$ and zero otherwise,
we exploit the fact that the `ensemble occupancy' factor
$\fsigma_i=\iee{\tsigma_i}\equiv\sum_{\FE}w_{\FE}\tsigma_{i\FE}$
requires weights $w_{\FE}$ that are piecewise linear in $\fsigma_i$,
from which it follows that $\iee{\tsigma_i\tsigmap_j}\equiv
\sum_{\FE}w_{\FE}\tsigma_{i\FE}\tsigmap_{j\FE}$
is similarly piecewise linear. All energy terms are proportional
to $\iee{\tsigma_i}$ or $\iee{\tsigma_i\tsigmap_j}$
and are thus piecewise linear. As will be discussed in more
detail later this is equivalent, under an
exchange approach, to finding a non-interacting ensemble of
Slater determinants formed from a \emph{common} set of orbitals
produced in a \emph{common} potential.
This allows the creation of simple functionals that avoid much
of the ``localization and delocalization error'' of
Yang et~al.\cite{Yang2000,MoriSanchez2006,Cohen2008,MoriSanchez2009},
and the ``many electron self interaction error'' of
Perdew et~al.\cite{Perdew2007}.
In the present work we focus on two illustrative cases:
i) a single partially occupied ``frontier'' orbital with
$0\leq \fup_h \leq 1$ and $0\leq \fdown_h \leq 1$; and
ii) open $p$ shells with $\fup_h=\fdown_h$. However the scheme
itself has wider applicability, including the full dissociation
problem of molecules. Ref.~\onlinecite{Gidopoulos2002} might
be considered another specific example of this approach, while
Ref.~\onlinecite{Balawender2005} outlines a similar approach via HF
for the restricted case of fractional occupation of a single spin
(their 1SSO approach).
\subsection{Non-interacting `exchange' ensembles}
To illustrate the general approach we consider, as an example,
ensembles with total and spin-resolved
electron number $N_t=N+f$ and $N_{t\sigma}=N/2+\fsigma$ ($N$ is even).
The groundstate ensemble members and weights can be found be
minimising over density matrices subject to various constraints.
However for simple cases where energy ordering is obvious,
one can construct the ensemble more intuitively, just by demanding
that a given set of occupations $\fsigma_i$ be reproduced.
For example if the frontier orbital is non-degenerate (eg. in an $s$ shell),
then the ensemble will be composed of up to three components.
For $f\leq 1$, the ensemble is formed from $\fup$ parts an $N+1$
electron system with extra electron in $\up$ (short-hand $N+\up$),
$\fdown$ parts $N+\down$ and
$(1-f)$ parts $N$ where, because $N$ is even, both
spins are filled equally. For $f\geq 1$ the ensemble comprises
$(1-\fdown)$ parts $N+\up$, $(1-\fup)$ parts $N+\down$, and
$(f-1)$ parts $N+2$.
The density matrix is composed
of many-electron wavefunctions $\iket{\Phi_{\FE}}$ and is
\begin{align}
\rhoh^{f}=&\sum_{\FE}w_{\FE}\ket{\Phi_{\FE}}\bra{\Phi_{\FE}}.
\label{eqn:PhiEns}
\end{align}
For the present case of a non-degenerate frontier orbial
$w_{\FE}\in \{ 1-f,\fup,\fdown \}$ and
$\Phi_{\FE}\in \{\Phi_{N},\Phi_{N+\up},\Phi_{N+\down}\}$ for $f\leq 1$
while $w_{\FE}\in \{ 1-\fdown,1-\fup,f-1 \}$ and
$\Phi_{\FE}\in \{\Phi_{N+\up},\Phi_{N+\down},\Phi_{N+2}\}$ for $f> 1$.
This leads to a total energy $E(f)=\tr[\rhoh^f\hat{H}]
=\sum_{\FE}w_{\FE}E[\Phi_{\FE}]$ that obeys
\begin{align}
E(f)=&\begin{cases}
f E_{N+1}+ (1-f) E_{N}, & 0\leq f\leq 1
\\
(f-1) E_{N+2} + (2-f) E_{N+1}, & 1<f \leq 2
\end{cases}
\label{eqn:Ef}
\end{align}
where $E_{N}$ is the energy of an $N$-electron system
(note that $E_{N+\up}=E_{N+\down}\equiv E_{N+1}$).
The LEXX is defined in general by assuming
that the trial density matrix $\rhoh^{fT}$ of the ensemble
obeys the same relationship \eqref{eqn:PhiEns} but with the component
wavefunctions $\iket{\Phi_{\FE}}$ replaced by Hartree-Fock like
determinants $\iket{\Phi_{\FE}^T}$
constructed from a \emph{single} set of spin-dependent
orbitals $\{\iket{i\sigma}\}$. This trial density matrix:
i) reduces to the regular EXX for integer occupation, ii) gives
correct energies for H with less than one electron, split arbitarily
between spins, and iii) is constructed from a single
set of orbitals $\ket{i\sigma}$ evaluated in a common Hamiltonian,
a requirement that ensures that OEP or KS methods can be used.
Here the orbitals are
eigen-solutions $\hh\iket{i\sigma}=\epsilon_{i\sigma}\iket{i\sigma}$
of a one-body Hamiltonian $\hh=\th + \Vh$.
We sort the orbitals so that $\epsilon_{i\sigma}\leq\epsilon_{j\sigma}$
for $i<j$.
Taking the spin-resolved density
$n_{\sigma}(\vr)=\tr[\rhoh^{fT}\hat{n}_{\sigma}(\vr)]$
one now finds
\begin{align}
n_{\sigma}(\vr)=&\sum_i \iee{\tsigma_i}|\phi_{i\sigma}(\vr)|^2
\equiv\sum_i \fsigma_i|\phi_{i\sigma}(\vr)|^2,
\label{eqn:n}
\end{align}
where typically $\fsigma_i=1$ for the inner orbitals
and $\fsigma_h=\fsigma$ where $\ket{h\sigma}$ is the
frontier orbital in the spin-shell with highest energy:
which may or may not be occupied in both spins.
The EXX approximation ($\Ec=0$) allows us to use only
the Hartree and exchange (Hx) components of the pair-density
$n_{2\Hx\sigma\sigma'}\equiv
\tr[\rhoh^{fT}\hat{n}_{\sigma}(\vr)\hat{n}_{\sigma'}(\vrp)]$
to evaluate the electronic groundstate.
From the properties of HF wavefunctions, the pair-density
of an ensemble can be written as
\begin{align}
n_{2\Hx\sigma\sigma'}\equiv&n_{2\Hrm\sigma\sigma'}+n_{2\xrm\sigma\sigma'}
\nonumber\\
=&\sum_{ij}\iee{\tsigma_i\tsigmap_j}
[P_{i\sigma j\sigma'}-\delta_{\sigma\sigma'}Q_{i\sigma j\sigma}].
\label{eqn:n2HxE}
\end{align}
Finally, we can use \eqref{eqn:n2HxE} to calculate the energy
via
\begin{align}
E^{\LEXX}=&
\sum_{\sigma}\int\d\vr
\lbrs t_{\sigma}(\vr) +n_{\sigma}(\vr)V^{\Ext}(\vr)
\rbrs
\nonumber\\&
+\half\sum_{\sigma\sigma'}
\int\frac{\d\vr\d\vrp}{|\vr-\vrp|} n_{2\Hx\sigma\sigma'}(\vr,\vrp)
\label{eqn:ELEXXE}
\\
\equiv & \sum_{i\sigma}\iee{\tsigma_i}e^{(1)}_{i\sigma}
+ \sum_{i\sigma j\sigma'}\iee{\tsigma_i\tsigmap_j}e^{(2)}_{i\sigma j\sigma'}
\label{eqn:ELEXXp}
\end{align}
where $t_{\sigma}(\vr)=\sum_i\frac{\iee{\tsigma_i}}{2}
|\vnabla\phi_{i\sigma}(\vr)|^2$ and
\begin{align}
e^{(1)}_{i\sigma}=&\int\d\vr \lbrs \half|\vnabla\phi_{i\sigma}|^2
+ V^{\Ext}|\phi_{i\sigma}|^2 \rbrs,
\label{eqn:ELEXXp1}
\\
e^{(2)}_{i\sigma j\sigma'}=&\half\int\frac{\d\vr\d\vrp}{|\vr-\vrp|}
\lbrs P_{i\sigma j\sigma'}-\delta_{\sigma\sigma'}Q_{i\sigma j\sigma} \rbrs.
\label{eqn:ELEXXp2}
\end{align}
These energy expression are perhaps the most general, and most important
in this work, highlighting the importance of ensemble averages
in the evaluation of average occupation and pair-occupation
factors for groundstate energy calculations.
\subsection{Fractional {$s$} shells}
For the fractionally occupied $s$ shells discussed here,
$\iee{\tsigma_i\tsigmap_j}=\min[\fsigma_i,\fsigmap_j]
-\delta_{ih,jh}\delta_{\sigma\sigmab'}\CU^h$
($\sigmab$ is the opposite spin to $\sigma$ and $\CU^h$ is defined
below). The Hartree and exchange components can be compactly written as
\begin{align}
n_{2\Hrm\sigma\sigma'}=&\sum_{ij} \min[\fsigma_i,\fsigmap_j] P_{i\sigma j\sigma'}
- \delta_{\sigma\sigmab'} \CU^h P_{h\sigma h\sigmab},
\label{eqn:n2H}
\\
n_{2\xrm\sigma\sigma'}=&-\delta_{\sigma\sigma'}
\sum_{ij} \min[\fsigma_i,\fsigma_j] Q_{i\sigma j\sigma}
\label{eqn:n2x}
\end{align}
where we have chosen to split Hartree and exchange
terms via $P$ and $Q$. The term
\begin{align}
\CU^h=\min[\fup,\fdown,(1-\fup),(1-\fdown)]
\label{eqn:CUh}
\end{align}
removes spurious ``ghost interactions'' between electrons
of unlike spin. For a zero to two electron system
equations \eqref{eqn:n2H}-\eqref{eqn:CUh} are equivalent (after integration)
to equation 7 of Ref.~\onlinecite{MoriSanchez2009}
sans the correlation energy term. This desirable outcome is
a direct result of the ensemble averaging.
When either $\fup$ or $\fdown$ is integer, $\CU^h=0$
and $n_{2\Hx\sigma\sigma'}\equiv \sum_{ij}\fsigma_i\fsigmap_j
[P_{i\sigma j\sigma'}-\delta_{\sigma\sigma'}Q_{i\sigma j\sigma'}]$ since
$P_{i\sigma i\sigma}=Q_{i\sigma i\sigma}$. Clearly this is the form
used in \eqref{eqn:NEHx} and thus energies derived from
\eqref{eqn:n2H}-\eqref{eqn:n2x} will be identical.
We can now proffer an explanation for the variable success
of the EXX for fractionally occupied ensembles.
By violating the aufbau principle and/or allowing spins to vary in
an unrestricted fashion, good results have been obtained for
atoms and diatoms\cite{Cohen2009,Makmal2011} and systems with fractional
occupancy\cite{MoriSanchez2006}. In these works only one spin was
allowed to be non-integer so that
$\iee{\tsigma_h\tsigmap_h}=\fsigma_h\fsigmap_h$
and the EXX and LEXX energies were equivalent.
In systems where both spins were fractionally occupied
(eg. Refs.~\onlinecite{Cohen2009} and \onlinecite{Hellgren2012-2})
the EXX failed to reproduce the correct derivative discontinuity.
In these works $\fup=\fdown=f/2$ and
$\iee{\tsigma_h\tsigmap_h}\neq\fsigma_h\fsigmap_h$.
Thus the EXX and LEXX energies differed.
We show later that, in this case, the LEXX is guaranteed to
produce a lower energy.
\subsection{Fractional {$p$} shells}
As a less trivial example, we also consider the case of
degenerate frontier $p$ orbitals with equal densities in each
spin. Here we must sum not only over
ensembles members of different electron number, but also
over the degenerate combinations of $p_x$, $p_y$ and $p_z$
orbitals. For example, in an isolated carbon atom each ensemble
member has fully occupied $1s$ and $2s$ shells, but only
two occupied $2p$ orbitals of the same spin $\sigma$ which we denote
$p_{\gamma}\sigma$ and $p_{\delta}\sigma$ where
$\gamma\neq\delta$ and $\gamma,\delta\in \{x,y,z\}$.
To find the equal-spin, spherically symmetric ensemble
we weight each ensemble equally so that $w_{p_{\gamma}p_{\delta}\sigma}=\frac16$
for all six combinations of $\gamma\neq\delta$ and $\sigma$.
In member $p_{\gamma}p_{\delta}\sigma$
we set $\tsigma_{2p,p_{\gamma}}=\tsigma_{2p,p_{\delta}}=1$
while the remaining $2p$ orbital with spin $\sigma$, and all $2p$
orbitals with spin $\sigmab$ have zero occupation. Averaging
over all cases gives $\iee{\tsigma_{2p,p_{\gamma}}}=\frac13$
as expected, while
$\iee{\tsigma_{2p,p_{\gamma}}\tsigma_{2p,p_{\gamma}}}=\frac13$,
$\iee{\tsigma_{2p,p_{\gamma}}\tsigma_{2p,p_{\delta}}}=\frac16$
for $\gamma\neq\delta$
and $\iee{\tsigma_{2p,p_{\gamma}}\tsigmab_{2p,p_{\delta}}}=0$.
For general unfilled frontier $p$ shells this yields an additional
like-spin correction of the form $-\CL^{h\sigma}[P-Q]$
to \eqref{eqn:n2H} and \eqref{eqn:n2x}
so that
\begin{align}
n_{2\Hrm\sigma\sigma'}=&\sum_{ij} \min[\fsigma_i,\fsigmap_j] P_{i\sigma j\sigma'}
\nonumber\\&
- \lbr\delta_{\sigma\sigma'} \CL^h - \delta_{\sigma\sigmab'} \CU^h \rbr
\sum_h P_{h\sigma h\sigma'},
\label{eqn:n2Hp}
\\
n_{2\xrm\sigma\sigma'}=&-\delta_{\sigma\sigma'}
\sum_{ij} \min[\fsigma_i,\fsigma_j] Q_{i\sigma j\sigma}
\nonumber\\&
- \delta_{\sigma\sigma'} \CL^h \sum_hQ_{h\sigma h\sigmab},
\label{eqn:n2xp}
\end{align}
where we recognise the degeneracy in the outermost $p$ orbitals
by summing over $h$ with equal weights.
Let us restrict ourselves to the case
$\fup=\fdown=f/2$ where $0\leq f<2$ is the total occupation
(over both spins) of each orbital in the shell. One can sum over
the ensemble to show (after much work)
\begin{align}
\CL^{h\sigma}=&\half\min[f,\allowbreak 2-f,\allowbreak |1-f|,\allowbreak 1/3]
\label{eqn:CLh}
\end{align}
for open $p$ shells. We note that the total number of electrons
in the shell is $N_p=3f$ and \eqref{eqn:n2Hp}-\eqref{eqn:CLh} are
valid for $N_p$ integer or fractional.
The like-spin correction ensures that a bilinear approach would
fail even for systems with one spin fully occupied. Indeed it is
only true for the case $\fup=1$, $\fdown=0$ (or vice versa)
occurring for half-occupied shells in N and P. Here one must not
only allow the spin-symmetry to be broken, but also break the spherical
symmetry to make the bilinear expression \eqref{eqn:NEHx} correct.
\subsection{General ensemble systems}
While we have so far determined our ensembles using explicit
knowledge of the degenerate groundstate, it is possible to
carry out a more general
ensemble minimisation to determine $w_{\FE}$. Here, for
a given potential, we allow ensemble members (determined
by member occupancy factors $\tsigma_{i\FE}$) to sample
all combinations of `occupied' and `unoccupied' orbitals
of the one-electron Hamiltonian,
and minimise the energy with respect to $w_{\FE}$.
In practice we would restrict the allowed ensemble members
to limited combinations predicted to be low in energy.
For example in the $p$ shell case given above, or indeed Be,
we might search for the minimum over cases with full occupancy
in $1s^2$ and varying occupancy in the near-degenerate
$2p$ and $2s$ orbitals.
As shown in equation~\eqref{eqn:ELEXXp},
the general LEXX energy $E^{\LEXX}[\{w_{\FE}\}]$ for a given
ensemble can be written as a sum of the ensemble averaged
occupations $\iee{\tsigma_i}$ and pair-occupations $\iee{\tsigma_i\tsigmap_j}$
with orbital dependent energy prefactors given in
equations~\eqref{eqn:ELEXXp1} and \eqref{eqn:ELEXXp2}.
These averaged occupations depend piecewise linearly on $w_{\FE}$ and thus
$E^{\LEXX}[\{w_{\FE}\}]$ can be minimised under the constraints
$0\leq w_{\FE}\leq 1$ and $\sum_{\FE}w_{\FE}=1$
ie. we look for the (constrained) set of weights minimising
\begin{align}
E^{\LEXX}=&\sum_{\FE}w_{\FE}[
\sum_{i\sigma}\tsigma_{i\FE}e^{(1)}_{i\sigma}
+ \sum_{i\sigma j\sigma'}\tsigma_{i\FE}\tsigmap_{j\FE}e^{(2)}_{i\sigma j\sigma'}
].
\end{align}
We can thus
find, for a given potential and orbtials, the optimal weights
$w_{\FE}$, and through them
$\iee{\tsigma_i}$ and $\iee{\tsigma_i\tsigmap_j}$. For the
true KS potential, this should be equivalent to finding the
temperature$\to 0^+$ limit of finite-temperature DFT.
Such an approach might be useful for dealing with the difficult
atomic dissociation problem.
\section{Optimised effective potentials}
For a many-electron system the EXX (or LEXX) groundstate energy
is composed of the orbital kinetic energy
$T_s=\half\int\d\vr \sum_{i\sigma}\fsigma_i|\nabla\phi_{i\sigma}|^2$,
the energy from the external potential
$E_{\Ext}=\int\d\vr V_{\Ext}n$ and the Hartree plus exchange energy $E_{\Hx}$.
For an ensemble we calculate $E_{\Hx}$ via, for example, the expansion
\eqref{eqn:n2H}-\eqref{eqn:n2x} of $n_{2\Hx\sigma\sigma'}$ for
$s$ shells [or \eqref{eqn:n2Hp}-\eqref{eqn:n2xp} for equi-$p$ shells]
to form the orbital dependent LEXX expression
\begin{align}
E_{\Hx}=&\sum_{\sigma\sigma'}
\int\frac{\d\vr\d\vrp}{2|\vr-\vrp|}n_{2\Hx\sigma\sigma'}(\vr,\vrp),
\label{eqn:EHx}
\end{align}
while for `standard' EXX we instead use \eqref{eqn:NEHx}.
The difference in energies between the LEXX and `standard' EXX
for frontier $s$ shells
is thus the difference between \eqref{eqn:EHx} and \eqref{eqn:NEHx}.
E.g. for the $s$ case
\begin{align}
E^{\LEXX}-E^{\EXX}=E_{\Hx}-E^S_{\Hx}= - \CUt^he_h
\label{eqn:EfEx}
\end{align}
where $E^S_{\Hx}$ is given by \eqref{eqn:NEHx} and
$e_h=\int\frac{\d\vr\d\vrp}{|\vr-\vrp|}P_{h\up h\down}$
and $\CUt^h=\CU^h-\min[\fup,\fdown]+\fup\fdown
=\min[\fup\fdown,(1-\fup)(1-\fdown)]$
governs the unlike-spin correction to the Hartree energy required
when both $\fup$ and $\fdown$ are non-integer. A similar expression
can be derived for the like-spin correction to $p$-shells.
We can now define orbital dependent groundstate energies via
$E^{\EXX}=T_s+E_{\Ext}+E^S_{\Hx}$ for the EXX and
$E^{\LEXX}=E^{\EXX}-\CUt^he_h$ for the LEXX.
In an optimised-effective potential\cite{OEP1,*OEP2} approach, we look for
a potential $V\equiv V_{\orm\sigma}(\vr)$ such that the orbitals
satisfying $[\th+V_{\orm\sigma}]\phi_{i\sigma}=\epsilon_{i\sigma}\phi_{i\sigma}$
minimise the energy. Here we call this approach the
$\OEXX$ or $\OLEXX$ (with an overline to denote use of an optimised
effective potential) depending on the Hx functional used.
Finding $V_{\orm\sigma}$ involves, as input, the functional derivatives
$D_{i\sigma}(\vr)=\delta E_{\Hx}/\delta\phi_{i\sigma}(\vr)$.
Thus the scheme for finding optimised LEXX solutions
differs only from that for the regular EXX in that
$\Dt_{i\sigma}$ for the LEXX includes an extra term for $i=h$.
Via $\CUt^h$, the additional term vanishes whenever $\fup$ or $\fdown$
is integer, as expected (at least for $s$ shells).
Let us consider some of the formal implications of the LEXX.
Firstly, the total energy found in an optimised LEXX scheme
must be bounded below by the EXX energy of the full ensemble.
To prove this we first note that the ensemble EXX energy $E^{\EEXX}$
for an ensemble of positive weights $w_{\FE}$ of elements $\FE$
can be written as
$E^{\EEXX}(f)=\sum_{\FE} w_{\FE} E^{\EXX}_{\FE}[\{\phi^{\FE}_{i\sigma}\}]$
where $[\th+V^{\FE}_{\orm\sigma}]\phi^{\FE}_{i\sigma}=
\epsilon^{\FE}_{i\sigma}\phi^{\FE}_{i\sigma}$ and
$V^{\FE}_{\orm\sigma}$ is chosen to minimise $E^{\EXX}_{\FE}[\{\phi\}]$
and may vary between different ensemble members.
From \eqref{eqn:n}-\eqref{eqn:EHx}, it is clear that
$E^{\LEXX}[\{\phi_{i\sigma}\}]=\sum_{\FE} w_{\FE} E^{\EXX}_{\FE}[\{\phi_{i\sigma}\}]$
where $V_{\orm\sigma}$ in
$[\th+V_{\orm\sigma}]\phi_{i\sigma}=\epsilon_{i\sigma}\phi_{i\sigma}$
can no longer vary separately for each part of the ensemble.
Thus by the variational nature of an OEP we find
$E^{\EXX}_{\FE}[\{\phi^{\FE}_{i\sigma}\}]\leq E^{\EXX}_{\FE}[\{\phi_{i\sigma}\}]$
and $E^{\EEXX}(f)\leq E^{\OLEXX}$.
Secondly, we see that $E^{\LEXX}[\{\phi\}]\leq E^{\EXX}[\{\phi\}]$
for any set of orbitals $\{\phi\}$ and thus
$E^{\LEXX}[\LEXX]\leq E^{\LEXX}[\EXX]\leq E^{\EXX}[\EXX]$
(where the term in the square brackets labels the OEP
used to evaluate the orbitals)
with the equality holding (for $s$ shells) only when
$\CUt^h=0$ (ie. when each of the spins is integer occupied).
The former inequality follows from
\eqref{eqn:EfEx} by noting that $\CUt^h\geq 0$ and
$e^h=
\int\frac{\d\vr\d\vrp}{|\vr-\vrp|}P_{h\up h\down}\geq 0$
as $P_{h\up h\down}\geq 0$ (similarly for the like spin term) and the latter
follows from the minimisation principle of OEPs.
Putting the OEP inequalities together, we find
\begin{align}
E^{\EEXX}\leq& E^{\OLEXX} \leq E^{\OEXX}
\label{eqn:Ineq}
\end{align}
where we include the overline (indicating an optimised potential was used)
for clarity.
\section{Correlation energies}
The consequences of the improved pair-densities also extends
beyond exchange physics. Some beyond-dRPA correlation energy methods
[see \rcite{Eshuis2012} for an overview]
like the RPAx\cite{RPAx}, RXH\cite{Gould2012-RXH} and PGG\cite{PGG}
kernels, ISTLS\cite{ISTLS,*Gould2012-2}
and tdEXX\cite{Hellgren2008,*Hesselmann2010} depend in some way on the
groundstate pair-density. The difference between the EXX and LEXX
expressions will therefore manifest in \emph{correlation} energies too.
Here we can calculate the correlation energy via the ``ACFD''
functional (see e.g. Ref.~\onlinecite{Eshuis2012})
involving the orbital-dependent linear response function $\chi_0$,
and ``xc kernel'' $f_{\xc}$.
By way of example, the ``PGG''\cite{PGG} kernel directly uses
the pair-density to approximate
\begin{align}
f_{\xc\sigma\sigma'}(\vr,\vrp)\aeq&
\frac{1}{|\vr-\vrp|}
\lbr
\frac{n_{2\Hx\sigma\sigma'}(\vr,\vrp)}{n_{\sigma}(\vr)n_{\sigma'}(\vrp)}
-1 \rbr.
\end{align}
It thus captures
the ensemble physics at both the LEXX and correlation levels via
$n_{2\Hx}$.
\section{Results}
\begin{figure}[thb]
\caption{Groundstate energy differences
$E(\fup,\fdown)-E^{\OLEXX}(\half,\half)$ (Ha)
of H, Li and Na ions with fractional occupations
under EXX (left) and LEXX (right).\label{fig:Enf}}
\begin{tabular}{ll}
\includegraphics[width=0.42\linewidth]{EnPlotBH}
& \includegraphics[width=0.42\linewidth]{EnPlotLH}
\\
\includegraphics[width=0.42\linewidth]{EnPlotBLi}
& \includegraphics[width=0.42\linewidth]{EnPlotLLi}
\\
\includegraphics[width=0.42\linewidth]{EnPlotBNa}
& \includegraphics[width=0.42\linewidth]{EnPlotLNa}
\\
\end{tabular}
\end{figure}
In Figure~\ref{fig:Enf} we show correlation-free energies for
H, Li and Na-like fractional ions calculated in the optimised EXX and
LEXX schemes under the Krieger, Li and Iafrate\cite{KLI1992} (KLI)
approximation to the potential in a real space code for spherically
symmetric systems. Results are presented for
$\fup$ and $\fdown$ ranging from zero to one
such that $f$ ranges from zero (e.g. Na${}^+$) to two (e.g. Na${}^-$).
The true ensemble EXX energy $E^{\EEXX}$ takes the same,
piecewise linear form as \eqref{eqn:Ef}
but with groundstate energies $E_N$ of the ensemble members
(for integer $N$) replaced by EXX energies $E_N^{\EXX}$
from the optimal Slater determinant.
The sides of the surface plots show the case where one electron
is integer and the other fractional (or integer at the corners)
and it is clear that the results for the optimised EXX
and LEXX are identical as expected. In the interior,
however, a different picture emerges, with the required derivative
discontinuities at $\fup+\fdown=1$ being absent in the EXX
but clearly present in the LEXX.
The LEXX also varies minimally with $f=\fup+\fdown$ fixed
(along diagonals perpendicular to the projection), unlike the EXX.
The slight remaining non-linearity must be explained via the
implicit dependence
of the orbitals on $f$ as the energy formula is explicitly linear
in $f$. We are unsure if this is a result of the optimised effective
potential approach itself, or the KLI approximation thereto.
The LEXX clearly offers dramatic improvements over the EXX in energy
calculations. For Li and Na it also makes a good approximation
to the true EEXX energy without resorting to correlation physics.
Here the maximum variation from EEXX is at most 6mHa for Li and Na,
significantly smaller than the correlation energies of 45mHa and 396mHa
respectively\cite{Chakravorty1993} for the neutral atoms. Only
for H, where the orbitals of H and H${}^-$ differ significantly through
space, is the difference significant, growing to almost 20mHa
for $f\aeq 1.5$, comparable to the H${}^-$ correlation energy of 42mHa.
The LEXX was previously used\cite{Gould2012-RXH} to generate
groundstates for correlation energy calculations. We are thus
able to compare the correlation-free LEXX results from that
work with benchmark HF energies calculated by
Chakravorty \emph{et~al.}\cite{Chakravorty1993}.
To test the validity of the $p$ shell LEXX expression (using
equations~\eqref{eqn:n2Hp}-\eqref{eqn:CLh}) we compared the energies
of the first and second row open $p$ shell atoms B-F and Al-Cl as
these have integer electron numbers, but \emph{fractional} $f$.
For these atoms the LEXX energy has a maximum error of $<1.5$mHa (for O)
and a mean average error of just $0.6$mHa. To numerical accuracy
in our calculations this is close to exact agreement, and
justifies both the LEXX itself and the KLI
approximation to the OEP, at least for integer electron number.
\begin{figure}
\caption{Groundstate energy $E(N)$ of C and F ions
under the LEXX approach with and without correlation
energy included. Electrons are split equally between up and down spin
$N_{\up}=N_{\down}=N/2$.\label{fig:EnC}}
\includegraphics[width=1.00\linewidth]{EnPlotCarbon}
\\
\includegraphics[width=1.00\linewidth]{EnPlotFluorine}
\end{figure}
In Figure~\ref{fig:EnC}
we show the energy of carbon and fluorine ions with
five/eight to seven/ten electrons. For illustrative purposes
we show results with (RPA, PGG, exact) and
without (LEXX, EEXX) correlation energies
evaluated in the ``ACFD'' functional (see e.g. Ref.~\onlinecite{Eshuis2012}).
The LEXX is used for the kinetic, external, Hartree and exchange
energies in all calculations bar EEXX and exact.
Correlation energies are evaluated using the random-phase approximation (RPA)
and PGG kernel (see Ref.~\onlinecite{Gould2012-RXH} for technical details).
The exact groundstate energy of fractional ions of C is given by the
piecewise linear function
$E(N)=E_0^{\rm{C}}-(N-6)I^{\rm{C}}$ for $5\leq N\leq 6$ and
$E(N)=E_0^{\rm{C}}-(N-6)A^{\rm{C}}$ for $6<N\leq 7$ where $E_0^{\rm{C}}$ is the
groundstate energy of carbon, $I^{\rm{C}}$ is its ionisation potential
and $A^{\rm{C}}$ its electron affinity (with similar expression for F).
Energies and ionisation potentials are taken from
Ref.~\onlinecite{Chakravorty1993} and affinities from
Refs.~\onlinecite{Aff4,Aff6}.
The EEXX energy is defined in the same way but with $E_0$, $I$ and $A$
replaced by correlation-free EXX values.
Clearly the LEXX without correlation approximates the
piecewise linear form, albeit
incorrectly predicting negative fractional affinities for
$N\lesssim 6.75$ for C and $N\lesssim 9.60$ for F.
Including correlation improves things, although even here there is
a small range with negative affinities,
at $N\lesssim 6.25$ for C with the RPA and PGG kernels,
and $N\lesssim 9.25$ for F with the PGG kernel. It is clear that
the ``LEXX-PGG'' (PGG evaluated with an LEXX pair-density)
is a fairly good approximation to the groundstate ensemble
energy at all fractions in both cases, especially for the positive ions.
The derivative discontinuity
shown here comes entirely from our correct treatment of Hx in most
cases, with a nonzero but very small extra contribution
from correlation in the PGG case.
We aim to further investigate
correlation energies at fractional occupation in future work.
\section{Conclusions and further work}
While the discussion here has focused on Fermionic systems with
non-degenerate frontier orbitals and ensembles constructed
around varying electron number, the general approach holds true
for any non-interacting ensemble system.
For example in Bosonic systems, orbital SI is not cancelled by
exchange terms even for integer occupation, a situation which
favours the present type of analysis of the ``Hartree'' and ``exchange''
terms. Other interesting cases include finite distance dissociation,
where quantum superpositions of determinants are required as well
as classical ensembles; and thermal ensembles.
LEXX physics is also useful beyond the OEP LEXX method discussed here.
It should be possible to construct
local density functionals (like the LSDA) from pseudo-densities
based on the modified exchange and/or Hartree pair-density
via an approach like that of Ref. \onlinecite{Gidopoulos2012}
or Ref. \onlinecite{Gaiduk2012}.
This perhaps provides some further justification for the success of
recent work by Johnson and Contreras-Garc{\'\i}a\cite{Johnson2011}.
The LEXX may also have potential uses in
$\fancy{O}(N)$-scaling DFT approaches (see \rcite{ONReview}
for a recent review).
By constructing a density matrix with similar properties to the exact
ensemble, we were able to develop an LEXX formalism yielding
an orbital-dependent total energy \eqref{eqn:ELEXXE} via a
pair-density, piecewise \emph{linear} in the occupation factors,
and involving ensemble averages of the one $\iee{\tsigma_i}$
and two $\iee{\tsigma_i\tsigmap_j}$ orbital pair factors
[see \eqref{eqn:ELEXXp}-\eqref{eqn:ELEXXp2}].
This is exemplified for doubly fractional $s$ shells
in \eqref{eqn:n2H}-\eqref{eqn:n2x} with ghost-interactions supressed
by the correction term \eqref{eqn:CUh} and with similar expressions
for $p$ shells discussed in \eqref{eqn:n2Hp}-\eqref{eqn:n2xp} with
additional like-spin correction term \eqref{eqn:CLh}.
Using these energy expressions in the OEP LEXX functional
proposed here gives clearly improved results
(with $E^{\EEXX}\leq E^{\OLEXX}\leq E^{\OEXX}$)
when compared with the more common form of EXX, without resorting
to correlation physics.
This suggests that the very notion of electron correlation is
imprecisely defined for OEP or KS systems with fractional occupancy.
Using the properties of ensembles to create better trial wavefunctions
and density matrices can be an excellent means of reducing the workload
of the correlation functional in such systems.
\acknowledgments
The authors were supported by ARC Discovery Grant DP1096240. We would
like to thank Maria Hellgren, E. K. U. Gross and J. P. Perdew
for helpful discussion.
\section*{References}
|
{
"timestamp": "2013-01-09T02:00:17",
"yymm": "1206",
"arxiv_id": "1206.6158",
"language": "en",
"url": "https://arxiv.org/abs/1206.6158"
}
|
\section{Introduction}
In zero-dimensional nonlinear systems noise may induce a wide spectrum of important phenomena such as stochastic resonance \cite{gammaitoni},
coherence-resonance \cite{lucafrancesco} and noise-induced transitions \cite{hl,wiolindenberg,lucafrancesco}. Noise-induced transitions (also called phenomenological stochastic bifurcations) consists in qualitative changes of the statistical properties of a stochastic system, characterized by transitions from unimodality to bimodality of the stationary probability densities of state variables, and similar phenomena. Note that noise-induced-transitions are well-distinct from phase transitions that need spatially extended systems \cite{wiolindenberg}. Genuine noise-induced phase transitions have been, instead and not surprisingly, found in many spatiotemporal dynamical systems \cite{Ibanhes,GObook,sagues}.
Many studies in the field of noise-induced phenomena in both zero-dimensional and in spatially extended systems were, respectively, based on temporal \cite{hl} or spatiotemporal white noises \cite{sagues, Wang1,Wang2,Wang3}. This important model of noise is, however, mainly appropriate when modeling internal "hidden" degrees of freedom, of microscopic nature. On the contrary, extrinsic fluctuations (i.e. originating externally to the system in study) may exhibit both temporal and spatial structures \cite{GObook,sanchoPhysD}, which may induce new effects. For example, it was shown that zero dimensional systems perturbed by colored noises exhibit correlation-dependent properties that are missing in case of null autocorrelation time, such as the emergence of stochastic resonance also for linear systems, and re-entrance phenomena, i.e. transitions from monostability to bistability and back to monostability \cite{wiolindenberg,hanggi,lucafrancesco}. Even more striking effects are observed in spatially extended systems that are perturbed by spatially white but temporally colored noises. These phenomena are induced by a complex interplay between noise intensity, spatial coupling and autocorrelation time \cite{wiolindenberg}.
Garc\'ia-Ojalvo, Sancho and Ram\'irez-Piscina introduced in \cite{GO92} the spatial version of the Ornstein-Uhlenbeck noise, which we shall call GSR noise, characterized by both a temporal scale $\tau$ and a spatial scale $\lambda$ \cite{lam}. The Ginzburg-Landau field model - one of the best-studied amplitude equation representing 'universal' nonlinear mechanisms - additively perturbed by the GSR noise was investigated in \cite{GO94,GObook}, where
it was shown the existence of a non-equilibrium phase transition controlled by both the correlation time and the correlation length \cite{GO94,GObook}.
The above-summarized body of research is essentially based on the use of Gaussian Noises (GNs), which is the best approximation of reality in many cases.
However, an increasing number of experimental data shows that many real-life stochastic processes does not follow white or colored Gaussian laws, but other probability densities (such as “fat-tail” power-laws \cite{New05}). More recently, theoretical research focused on another important class of non-Gaussian stochastic processes: the bounded noises.
Probably the most studied bounded noise is the Dichotomous Markov Noise (DMN)\cite{lucafrancesco}. In the last twenty years, other classes of bounded noises were defined and intensively studied in statistical physics \cite{wioII,CaiLin,bobryk} and in engineering \cite{dimentberg}, and - to a lesser degree - in mathematics \cite{Homburg} and quantitative biology \cite{pre,dongan}.
The rise of scientific interest on bounded noises is motivated by the fact that in many applications both GNs and “fat-tailed” non-Gaussian stochastic processes are an inadequate mathematical model of the physical world because of their infinite domain. This should preclude their use to model stochastic fluctuations affecting parameters of dynamical systems, which must be bounded by physical constraints \cite{wioII,bobryk,pre}. Moreover, in biology and elsewhere, some parameters must also be strictly positive. As a consequence, not taking into account the bounded nature of stochastic fluctuations may lead to unrealistic inferences. For instance, when the onset of noise-induced transitions depends on exceeding a threshold by the variance of a GN, this often means making negative or excessively large a parameter \cite{wioII,bobryk,dongan,pre}. To give an example taken from medicine, a GN-based modeling of the unavoidable fluctuations affecting the pharmacokinetics of an antitumor drug delivered by means of continuous infusion leads to a paradox. Indeed, the probability that the drug increases the number of tumor cells may become nonzero, which is absurd \cite{dongan,pre}. Thus, in order to avoid these problems, the stochastic models should in these cases be built on bounded noises.
In order to generate a temporal bounded noise, two basic recipes have been adopted so far. The first consists in generating the noise by means of an appropriate stochastic differential equation \cite{wioII,CaiLin}, whereas the second one consists in applying a bounded function to a standard Wiener process. In the purely temporal setting, two relevant examples of noises obtained by implementing the fist recipe are the Tsallis-Borland \cite{wioII} and the Cai-Lin \cite{CaiLin} noises, whereas an example generated by following the second recipe is the zero-dimensional sine-Wiener noise \cite{bobryk}.
Recently, in \cite{deFradOnpre} we introduced and numerically studied two spatiotemporal extensions of the above-mentioned Tsallis-Borland and Cai-Lin noises. In that work we applied - as an additive perturbation - these bounded noise to a Ginzburg-Landau (GL) model and stressed out the dependence of the phase transitions phenomena on both spatial and temporal correlation strength, as well as on the specific model of noise that has been adopted.
Our aim here is threefold. First, by adopting the 'second recipe' we want to define and numerically investigate a new simple spatiotemporal bounded noise, which extends both the zero-dimensional sine-Wiener bounded noise \cite{bobryk}, and the spatiotemporal unbounded GSR noises \cite{GO92,GObook}.
Second, we want to assess the effects of such bounded stochastic forces (i.e. of additive bounded noises) and of initial conditions on the statistical properties of the spatiotemporal dynamics of the Ginzburg-Landau (GL) equation.
Third we explore the similar and different features of the spatiotemporal sine-Wiener noise perturbation with respect to those of the Cai-Lin and Tsallis-Borland spatiotemporal bounded noises studied in \cite{deFradOnpre}.
Phase transitions induced in GL model by additive and multiplicative unbounded noises were extensively studied in last twenty years \cite{GObook,GO92pla,GO92,gpsv,various,ms,jstat,ss,lucafrancesco,ouch}. It follows that we shall mainly focus on the effects more strictly related to the boundeness of the noise in study. In particular, we will compare the response of GL system to SW noise with the one to GSR noise.
\section{Spatiotemporal colored unbounded noise}
Let us consider the well-known zero-dimensional Ornstein-Uhlenbeck stochastic differential equation:
\begin{equation}\label{oue}
\xi^{\prime} (t)= -\frac{1}{\tau}\xi(t) + \frac{\sqrt {2 D} }{\tau}\eta(t),
\end{equation}
where $\tau$ is the typical temporal correlation, $\sqrt {2 D}$ is the noise strength and $\eta(t)$ is a Gaussian white noise of unitary intensity:
\begin{equation}
\langle \eta(t)\eta(t_1)\rangle= \delta(t-t_1).
\end{equation}
It is well-known that solution of eq. (\ref{oue}) is a gaussian colored stochastic process with autocorrelation:
\begin{equation}
\langle \xi(t)\xi(t_1)\rangle \propto \ exp\left(-\frac{|t-t_1|}{\tau}\right).
\end{equation}
In \cite{GO92} eq. (\ref{oue})
was generalized in a spatially extended setting by including in it the most known and simple spatial coupling, the Laplace operator, yielding the following partial differential Langevin equation
\begin{equation}\label {gener}
\partial_t \xi (x,t)= \frac{\lambda^2}{2 \tau}\nabla^2 \xi(x,t) -\frac{1}{\tau}\xi(x,t) + \frac{\sqrt {2 D} }{\tau}\eta(x,t),
\end{equation}
where $\lambda>0$ is the spatial correlation strength \cite{GO92} of $\xi (x,t)$.\\
As usual in non-equilibrium statistical physics, we shall investigate the lattice version of (\ref{gener}):
\begin{equation}\label {generlattice}
\xi_p^{\prime} (t)= \frac{\lambda^2}{2 \tau}\nabla_L^2 \xi_p(t) -\frac{1}{\tau}\xi_p(t) + \frac{\sqrt {2 D} }{\tau}\eta_p(t),
\end{equation}
where $ p = h \ (i,j)$ is a point on a $N*N$ lattice with steps equal to $h$. The symbol $\nabla_L^2$ denotes the discrete version of the Laplace's operator:
\begin{equation}\label {lapllatt}
\nabla_L^2 \xi_p (t)= \frac{1}{h^2}\sum_{i \in ne(p)}(\phi_i-\phi_p),
\end{equation}
where $ne(p)$ is the set of the neighbors of the lattice point $p$. The Weiss mean field method \cite{parrondo} applied to eq. (\ref{generlattice}) yields that for $N>>1$ the one-site distribution of the GSR noise is:$ P_{GSR}(\xi) = C exp(-\xi^2/(2 \sigma_{GSR}^2) )$, where
\begin{equation}\label{GSRsigma}
\sigma_{GSR}^2 = \frac{D}{\tau_c(1+2 \lambda^2)}
\end{equation}
\section{The sine-Wiener spatiotemporal bounded noise: definition and properties}\label{SWnoise}
The sine-Wiener noise is obtained by applying the bounded function $h(u) = B \sin(\sqrt{2/\tau}u)$ to a random walk $W(t)$ defined as $W'= \eta(t)$, where $\eta(t)$ is a white noise of unitary intensity, yielding:
\begin{equation}
\zeta(t)= B \sin\left( \sqrt{ \frac{2}{\tau}}W(t) \right).
\end{equation}
The stationary probability density of $\zeta(t)$ is given by
\begin{equation}
P_eq(\zeta) = \frac{1}{\pi \sqrt{B^2 -\zeta^2}},
\end{equation}
thus: $P_eq(\pm B)=+\infty$. Thanks to this property, one may consider the sine-Wiener noise as a realistic extension of the Markov dichotomous noise, whose stationary density is $P_eq(\zeta) = (1/2)\delta(\zeta-|B|)$.
Here, as a natural spatial extension of the sine-Wiener noise, we define the following spatiotemporal noise:
\begin{equation}\label{XXX}
\zeta(x,t)= B \sin\left( 2 \pi \xi(x,t) \right),
\end{equation}
where $\xi(x,t)$ is the spatiotemporal correlated noise defined by (\ref{gener}).
If the number of lattice sites is sufficiently large, we may study the global behavior of the spatiotemporal noise by means of the equilibrium heuristic probability density of the noise lattice variables $\zeta_p$, $P_{eq}(\zeta)$.
We observed that when varying the spatial coupling parameter $\lambda$ of the underlying GSR noise, the distribution of $\zeta(x,t)$ exhibits at $\lambda = \lambda^* \approx 4 $ a stochastic bifurcation (see figure \ref{fig_P_eq}.a): for $0 \le \lambda < \lambda^*$ the distribution is bimodal, whereas for $\lambda>\lambda^*$ the distribution is trimodal, since an additional mode at $\zeta = 0$ appears. Similar bifurcations are observed if varying $D$ (see figure \ref{fig_P_eq}.b) or $\tau$ (although, in this case, the bifurcation value is very large).
These behaviors may be heuristically explained by the one-site distribution of the underlying GSR noise $\xi$. Indeed, defining the 'span' of the GSR noise as
\begin{equation}\label{sp}
S = 2 \sigma_{GSR} = 2 \sqrt{\frac{D}{\tau_c(1+2 \lambda^2)}}
\end{equation}
yields that $S$ increases with $D$, and decreases both with $\tau_c$ and $\lambda$. Thus both the above-mentioned numerically observed phenomena may be explained.
To start, note that for small $\lambda$ it is $S \approx \sqrt{2 D/\tau_C}$. Thus if $S$ is sufficiently large, the horn-shaped distribution is observed, due to the large span of the argument of the sinus, which remains roughly constant (provided that $\lambda$ is such that $2 \lambda^2<<1$).
On the contrary, for large $\lambda$, $S$ becomes small, and the argument of the sinus remains prevalently small, whereby causing the onset of a central new mode.
\begin{figure}
\begin{center}
\subfigure[]
{
\label{A}
\includegraphics[width=0.48\textwidth]{Fig1a.eps}
}
\subfigure[]
{
\label{B}
\includegraphics[width=0.48\textwidth]{Fig1b.eps}
}
\end{center}
\caption{Equilibrium distribution $P_{eq}(\zeta_p)$ of the sine-Wiener bounded spatiotemporal noise, on a $40\times40$ lattice system with $B=1$. Panel \subref{A}: stochastic bifurcation induced by varying the parameter $\lambda$, with $\tau_c=2$ and $\sqrt{2D} =1$. Panel \subref{B}: stochastic bifurcation induced by varying the parameter $\sqrt{2D}$, with $\tau_c=2$ and $\lambda=0$.}
\label{fig_P_eq}
\end{figure}
\section{The Ginzburg-Landau equation perturbed by additive sine-Wiener noise}
Let us consider the following bidimensional lattice-based Ginzburg-Landau equation:
\begin{equation}\label {GLequation}
\partial_t \psi_p= \frac{1}{2}\left(\psi_p-\psi_p^{3}+\nabla_L^2 \psi_p\right) +A_p(t),
\end{equation}
where $A_p(t)$ is a generic bounded or unbounded additive noise. In \cite{GObook} Garc\'ia-Ojalvo \textit{et al.} studied the eq. (\ref{GLequation}) under the assumption that $A_p(t)= \xi_p(t)$, where $\xi_p(t)$ is the GSR noise defined by eq. (\ref{generlattice}). They showed that both spatial and temporal correlation parameters (respectively, $\lambda$ and $\tau$) shift the transition point towards larger values.
In the following we will illustrate some analytical and numerical results for the case $A_p(t) = \zeta_p(t)$, where $\zeta_p(t)$ is the bounded sine-Wiener noise defined by eq. (\ref{XXX}) and computed at the lattice sites. We stress here that our aim is to provide a solid testbed to the novel type of spatiotemporal bounded noise here defined, and not to evidence some unknown aspects of the GL model, which is one of most important and studied models of statistical mechanics.
In line with \cite{GObook}, phase transitions in GL equation will be characterized by means of the order parameter 'global magnetization', i.e.:
\begin{equation}
M\equiv\frac{<|\sum_{p}\psi_p|>}{N^{2}},
\end{equation}
and of its relative fluctuation $\sigma_M$ \cite{GObook}:
\begin{equation}
\sigma_M\equiv \sqrt{\frac{<|\sum_{p}\psi_p|^{2}>-<|\sum_{p}\psi_p|>^{2}}{N^{2}} }.
\end{equation}
Again in line with \cite{GObook}, we define a transition from large to small values of the order parameter as an 'order to disorder' transition. However, by no means we state the equivalences 'disorder = randomness' and 'order = homogeneity'.
All simulations have been performed in a $40\times40$ lattice for a time interval $[0, 250]$, and the temporal averages were computed in the interval $[125,250]$. In all cases, noise initial condition was set to $0$.
We will main focus on the case where the initial state is $\psi(x,0)=1 \mbox{} \forall x$.
\subsection{Some analytical considerations on the role of $B$}
Lattice-based system (\ref {GLequation}) is endowed by an important mathematical property. Indeed, it is a cooperative system \cite{coppel} since:
\begin{equation} \partial_{\psi_k}\psi_p^{\prime} \ge 0. \end{equation}
This property and the fact that $A_p(t) \ge -B$ implies that: \begin{equation} \psi_p(t)\ge \widetilde{\psi}_p(t), \end{equation}
where
\begin{equation} \partial_t \widetilde{\psi}_p= \frac{1}{2}\left(\widetilde{\psi}_p-\widetilde{\psi}_p^{3}+\nabla_L^2 \widetilde{\psi}_p\right) -B \end{equation}
with $\widetilde{\psi}_p(0)=\psi_p(0)$.
Now, note that if $0<B<B^* =1/(3 \sqrt{3})$ then the equation
\begin{equation} s-s^3 = 2 B \end{equation}
has three solutions $s^a(B)<0$, $s^b(B)\in (0,1)$ and $s^c(B)\in (0,1)$ such that $s^b(B)<s^c(B)$. For example, for $B=0.19<B^*$ it is: $s^a(0.19)\approx-1.15306$, $s^b(0.19)\approx 0.52331$ and $s^c(0.19)=0.62975$. In particular, if $B<<1$ then it is $s^c(B)\approx 1-B $ and $s^a(B)\approx -1-B$. It is an easy matter to show that if $\widetilde{\psi}_p(0)>s^b(B)$ then $\widetilde{\psi}_p(t)>s^b(B)$, also implying $\psi_p(t)>s^b(B)$ and of course that $M(t)> s^b(B)$ and $M_s(t)> s^b(B)$. Indeed, suppose that at a given time instant $t_1$ all $\widetilde{\psi}_p(t_1) \ge s^b(B), $ but a point $q$ where $\psi_q(t_1)=s^b(B)$. Thus, it is
\begin{equation} \partial_t \widetilde{\psi}_q(t_1)= \frac{1}{2}\left(\widetilde{\psi}_q-\widetilde{\psi}_q^{3}+\nabla_L^2 \widetilde{\psi}_q\right) -B = 0 + \frac{1}{2} \nabla_L^2 \widetilde{\psi}_q \ge 0. \end{equation}
Note that the vector $c(B)= s^c(B) (1,\dots,1)$ is a locally stable equilibrium point for the differential system ruling the dynamics of $\widetilde{\psi}_p(t)$. Indeed, $c$ is a minimum of the associated energy. However, the system might be multistable, similarly to the GL model with total coupling in the lattice \cite{rt}. By adopting a Weiss mean field approximation, one can proceed
as in \cite{rt} and infer that the equilibrium is unique for $N>>1$. Namely, defining the auxiliary variable:
\begin{equation} m_p = \sum_{j \in ne(p)} \widetilde{\psi}_j \end{equation}
the equilibrium equations reads
\begin{equation}\widetilde{\psi}_p^3 + 3 \widetilde{\psi}_p = 4 m_p- 2 B. \end{equation}
Note that we are only interested to the subset $\widetilde{\psi}_p \ge s^b(B)$ that also implies $m_p \ge s^b(B)$. Note now that the equation $s+3 s^3 = x$ for $x>0$ has a unique positive solution $s= k(x)$. Thus
\begin{equation} \widetilde{\psi}_p = k( 4 m_p - 2 B ). \end{equation}
Now, by the following approximation
\begin{equation} m_p \approx \frac{1}{N}\sum_{j=1}^N \widetilde{\psi}_j, \end{equation}
one gets the equation:
\begin{equation} m = k( 4 m - 2 B ), \end{equation}
which has to be solved under the constraint $m> s^b(B)$. As it is easy to verify, the above equation has only one solution, $m=s^c(B)$.
Any case for $B<<1$ the initial point $\psi_p(0) = 1$ should be such that $\psi_p(t)$ remains in the basin of attraction of $c(B)$, so that for large times $\psi_p(t) \rightarrow s^c(B)$, implying that
\begin{equation}
LimInf_{t\rightarrow +\infty}\psi_p(t) \ge s^c(B).
\end{equation}
From the inequality $A_p(t) \le B$, by using similar methods one may infer that for small $B$ it is
\begin{equation}
LimSup_{t\rightarrow +\infty}\psi_p(t) \le u^c(B).
\end{equation}
where $u^c(B) > 1$ is the unique positive solution (for $B<B^*$) of the equation
\begin{equation} u-u^3 = - 2 B. \end{equation}
Note that it is $u^c(B)=-s^a(B)$, due to the anti-symmetry of function $s-s^3$.
Summing up, we may say that for small $B$ and probably for all $B \in (0,B^*)$ ) it is asymptotically
\begin{equation} s^c(B) < \psi_p(t) < u^c(B). \end{equation}
Finally, we numerically solved the system
\begin{equation} \frac{1}{2}\left(\widetilde{\psi}_p-\widetilde{\psi}_p^{3}+\nabla_L^2 \widetilde{\psi}_p\right) -B = 0\end{equation}
for various values of $B$ in the interval $(0.01,B^*)$ and in all cases we found only one equilibrium with components greater than $s^b(B)$: $\widetilde{\psi}= c(B) = s^c(B)(1,\dots,1) $. Similarly, when setting $A_p(t)=+B$ in eq. (\ref {GLequation}), we found only one equilibrium value: $u^c(B)(1,\dots,1)$.
\subsection{Phase Transitions}
In the curve $M$ vs. $\tau$ a phase transition is observed (see fig. \ref{fig_GLtau}) from large to small values of the order parameter $M$ (a so-called 'order' to 'disorder' phase transition). In absence of spatial autocorrelation, for large $\tau$ it is $M \approx 0$, whereas if one increases $\lambda$ one observe that the lower value of $M$ increases. Moreover, the transition point decreases with increasing $\lambda$.
\begin{figure}
\begin{center}
\subfigure[]
{
\label{A}
\includegraphics[width=0.48\textwidth]{Fig2a.eps}
}
\subfigure[]
{
\label{B}
\includegraphics[width=0.48\textwidth]{Fig2b.eps}
}
\end{center}
\caption{Effects of autocorrelation parameter $\tau$ on GL model perturbed by additive spatiotemporal sine-Wiener noise. The initial condition is $\psi(x,0)=1$. Panel \subref{A}: global magnetization $M$. Panel \subref{B}: relative fluctuation $\sigma_M$. Other parameters: $B=2.4$ and $\sqrt{2D}=1$.}
\label{fig_GLtau}
\end{figure}
Figure \ref{fig_GLtauB} shows the influence of the noise amplitude $B$ on curve $M$ vs. $\tau$. We observe that for small $B$, in line with our analytical calculations, no phase transition occurs. For larger $B$, phase transition is observed, and the transition point decreases with increasing noise amplitude.
\begin{figure}
\begin{center}
\includegraphics[width=0.5\textwidth]{Fig3.eps}
\end{center}
\caption{Effect of the noise amplitude $B$ on the curve $M$ vs. $\tau$ for GL model perturbed by additive spatiotemporal sine-Wiener noise. Here the initial condition is $\psi(x,0)=1$. Other parameters: $\lambda=1$ and $\sqrt{2D}=1$. }
\label{fig_GLtauB}
\end{figure}
Note that, based on the analytical study of the previous subsection, it is excluded that for small values of $B$ a phase transition could be observed for values of $\tau$ that are larger than the ones considered in figure \ref{fig_GLtauB}, also if we change $\lambda$ or $D$.
Finally, it is interesting to observe that since for large $\tau$ the span $A$ slowly tends to zero, it follows that $M$ will smoothly approach the value $M=1$.
Note that an increase of $\lambda$ also causes a decrease of $A$ and in turn a smooth increase of $M$, which can be observed in figure \ref{fig_GLlambda}.(a), where we plot $M$ versus $\lambda$ for $\tau = 2$.
In figure \ref{fig_surf}
we show, for three values of $\lambda$, the corresponding heat-map plots of both GL lattice field $\psi$ and of the SW noise $\zeta$. Note that, as one may read in fig \ref{fig_GLlambda}, although the corresponding values of $M$ are not large and one would be tempted to say that the field is 'disordered', the heath-maps instead show large spatially autocorrelated regions, whose size increases with $\lambda$. For example, the 'low' value $M=0.23$ corresponds to the right-upper panel of figure $\ref{fig_surf}$.
\begin{figure}
\begin{center}
\subfigure[]
{
\label{A}
\includegraphics[width=0.48\textwidth]{Fig4a.eps}
}
\subfigure[]
{
\label{B}
\includegraphics[width=0.48\textwidth]{Fig4b.eps}
}
\end{center}
\caption{Effects of spatial autocorrelation coefficient $\lambda$ on GL model perturbed by additive sine-Wiener spatiotemporal noise. Initial condition: $\psi(x,0)=1$. Panel \subref{A}: global magnetization $M$. Panel \subref{B}: relative fluctuation $\sigma_M$. Other parameters are $B=2.6$, $\tau=2$ and $\sqrt{2D}=1$.}
\label{fig_GLlambda}
\end{figure}
\begin{figure}
\begin{center}
\includegraphics[width=11.5cm,angle=-90]{fig5.eps}
\end{center}
\caption{Effects of spatial correlation strength $\lambda$ on the field $\psi$ of a $40\times40$ lattice GL system perturbed by additive sine-Wiener noise. The increase of spatial autocorrelation of the field is driven by that of the noise. All the panels refer to $t=250$. Upper panel: GL field; lower panel: sine-Wiener noise. Other parameters: $B=2.6$, $\sqrt{2D}=0.75$ and $\tau=2$.}
\label{fig_surf}
\end{figure}
Figure \ref{fig_osc} shows the time series for the signed magnetization defined as
\begin{equation}
M_s(t)\equiv\frac{\sum_{p}\psi_p(t)}{N^{2}}.
\end{equation}
This figure supports the idea that the above-mentioned clusters are in general non symmetric (i.e. the total positive and negative magnetization is different) and unstable, resulting in an oscillation between positive and negative magnetization, whose amplitude is increasing with $\lambda$.
\begin{figure}
\begin{center}
\subfigure[]
{
\label{A}
\includegraphics[width=0.48\textwidth]{Fig6a.eps}
}
\subfigure[]
{
\label{B}
\includegraphics[width=0.48\textwidth]{Fig6b.eps}
}
\end{center}
\caption{Effects of spatial and temporal noise correlation parameters in the solutions of GL system, measured by the signed magnetization $M_s$. Panel \subref{A}: the spatial correlation increases the amplitude of the oscillations between positive and negative signed magnetization (here $\tau = 2.5$). Panel \subref{B}: the parameter $\tau$ decreases the number of switches between negative and positive values $M_s$ (here $\lambda =9$). Other parameters $B=2.4$ and $\sqrt{2D}=1$.}
\label{fig_osc}
\end{figure}
By varying the white noise strength $\sqrt{2D}$ a re-entrant transition is observed, see fig. \ref{fig_GLeps}. Note that $\lambda$ increases the lower value of $M$ and shifts the first transition point, whereas its effect on the second transition point - where it exists - is modest.
\begin{figure}
\begin{center}
\subfigure[]
{
\label{A}
\includegraphics[width=0.48\textwidth]{fig7a.eps}
}
\subfigure[]
{
\label{B}
\includegraphics[width=0.48\textwidth]{fig7b.eps}
}
\end{center}
\caption{Re-entrant phase transition in GL model perturbed by additive spatiotemporal sine-Wiener noise for varying white noise strength $\sqrt{2D}$. Initial condition is $\psi(x,0)=1$. Panel \subref{A}: global magnetization $M$. Panel \subref{B}: relative fluctuation $\sigma_M$. Other parameters $B=2.6$ and $\tau=2$.
}
\label{fig_GLeps}
\end{figure}
Figure \ref{fig_GLdistri} illustrates the impact of the noise bound $B$ (left panel) and of the white noise strength $D$ (right panel) on the stationary distribution of the field $\phi$ of the GL model. Varying $B$ one may observe transitions from bimodality located close to $\Psi=1$ to bimodality with modes roughly at $\psi =\pm 1.25$. On the contrary, varying $D$ a re-entrant transition unimodality to bimodality back to unimodality is observed, which is in line with the re-entrant phase transition showed in fig. \ref{fig_GLeps}.
\begin{figure}
\begin{center}
\includegraphics[width=0.48\textwidth]{Fig8a.eps}
\includegraphics[width=0.48\textwidth]{Fig8b.eps}
\end{center}
\caption{Stationary distribution of the field for the GL model perturbed by additive spatiotemporal sine-Wiener noise, in response to changes in noise parameters $B$ (left panel), and $D$ (right panel). Other parameters are, respectively, ($\tau=2$, $\lambda=1$, $\sqrt{2D}=0.75$) and ($\tau=2$, $\lambda=1$, $B=2.6$).
}
\label{fig_GLdistri}
\end{figure}
\subsection{Transitory analysis}
In order to study the efficacy of the system in recovering the state with 'large' $M$, we re-consider the same transitions in $\tau$ and $\sqrt{2D}$ formerly analyzed with different initial conditions. Namely, here we assume that at the $\psi_p(0)$ are normally distributed with zero mean and standard deviation equal to $0.2$: $\psi_p(0) \propto N(0,0.2)$. The simulations and the averages were done, respectively, in the time intervals $[0,750]$ and $[625,750]$. Figure \ref{fig_GLtauIni0} shows, in the region with large $M$, the onset of very long transient states. On the contrary, the disordered phase is reached after a short transient. Similar results are obtained when varying $D$ (not shown).
\begin{figure}
\begin{center}
\includegraphics[width=8cm]{Fig9a.eps}\includegraphics[width=8cm]{Fig9b.eps}
\end{center}
\caption{Effects of temporal autocorrelation $\tau$ on GL model perturbed by additive spatiotemporal sine-Wiener noise, with disordered initial conditions normally distributed as follows: $\psi_p(0) \propto N(0,0.2)$. Other parameters (as in figure 2): $B=2.4$ and $\sqrt{2D}=1$.
Dots series represent (with various symbols) different realizations of the system at simulation time $750$, while the continuous line is the average value, computed over all the realizations and over the last 125 time units.}
\label{fig_GLtauIni0}
\end{figure}
\subsection{Comparison with the GSR noise}
In this section we shall compare the statistical outcomes of the solutions of the GL model under 'equivalent' GSR and sine-Wiener noises, and we shall describe the criteria to establish the related 'equivalence'.
Let us initially consider fig 2, reporting phase transitions in $\tau$ caused by sine-Wiener noise. We recall that in fig 2 each curve is identified by a specific value of $\lambda$ and all curves share two parameters: $B= 2.4$ and $\sqrt{2 D} = 1$. How to choose an 'equivalent' GSR noise? The first na\"{i}ve choice would be considering the GSR noise employed as argument of the sinus to generate the sine-Wiener noise. However, this choice would be 'unfair' for the GSR noise, since its span -roughly quantifiable as the double of its standard deviation $\sigma_{GSR}$ - would be too small. Instead, a more 'fair' way is to adopt GSR noises such that their 'span' is equal to the amplitude of the bounded noise: $2 \sigma_{GSR}=B_{SW}$. As a consequence for the generic $i-th$ point of the $j-th$ curve of fig. 2, identified by the pair $(\tau_i,\lambda_j)$, one has to generate a GSR noise by setting:
\begin{equation}\label{compD} D_{i,j}=\frac{B^2}{4}(1+2 \lambda_j^2)\tau_i. \end{equation}
In other words, at each point we must modify the strength of the white noise that - via the eq. (\ref{XXX}) - generates the sine-Wiener noise. In figure \ref{F2replypippoz} the result of the above-outlined comparison is shown. Also there a phase transition is observed, as the one shown in figure 2, but the transition point is at smaller values of $\tau$.
\begin{figure}
\begin{center}
\includegraphics[width=8cm]{Fig10.eps}
\end{center}
\caption{Eeffects on GL model of GSR noise obtained by means of formula
(\ref{compD}): curve $M$ vs $\tau$ for GL model perturbed by an additive GSR noise. Other parameters $B=2.4$. To be compared with fig. 2}
\label{F2replypippoz}
\end{figure}
Similarly, let us consider the phase transitions illustrated by figure \ref{fig_GLeps}. The k-th point of the j-th curve is defined by the pair $(D_k,\lambda_j)$. Thus, from the relationship $B_{SW}=2 \sigma_{GSR}$ the 'equivalent' GSR noise has to be chosen in a way such that:
\begin{equation} \label{comptau}\tau_{k,j}=\frac{D_k}{(1+2 \lambda_j^2)}\frac{4}{B^2}. \end{equation}
The comparison is shown in figure \ref{paperino}: no re-entrant transition is observed.
\begin{figure}
\begin{center}
\includegraphics[width=8cm]{Fig11.eps}
\end{center}
\caption{
Comparison via eq. (\ref{comptau}) of the effects of GSR vs. sine-Wiener noises on GL model: $M$ vs $\sqrt{2 D}$. In both curves $\lambda = 1$ and $B=2.6$, and in GSR noise $\tau=2$ . In the GSR noise perturbed case no re-entrant phase transitions are present.}
\label{paperino}
\end{figure}
However, a third kind of comparison can be performed by employing a somewhat opposite starting point, which is the response of the GL system to the GSR noise. This type of comparison prescribes that: i) one simulates a GL system excited by a given GSR noise with known parameters, say $(D_x, \tau_x, \lambda_x)$; ii) then one simulates the GL system perturbed by a sine-Wiener noise with the following amplitude
\begin{equation}\label{neweq}
B_{SW} = 2 \sqrt{ \frac{D_x}{(1+2 \lambda_x^2)\tau_x} }. \end{equation}
Figure \ref{pluto}.(a) shows the curve $M$ vs. $\tau$ for a GL system perturbed by a GSR noise with $\lambda \in \{ 0.1, 1.5,6\}$ and $\sqrt{2 D}=1$. Figure \ref{pluto}.(b) shows the corresponding diagram for the sine-Wiener noise, through the application of formula (\ref{neweq}). As one may see, for $\lambda =0.1$ the GSR noise induces a transition from small to large values of $M$, which is not observed in case of sine-Wiener noise. For larger $\lambda$ both the noises does not induce transitions in $M$.
\begin{figure}
\begin{center}
\subfigure[]
{
\label{A}
\includegraphics[width=0.48\textwidth]{Fig12a.eps}
}
\subfigure[]
{
\label{B}
\includegraphics[width=0.48\textwidth]{Fig12b.eps}
}
\end{center}
\caption{ Comparison via eq. (\ref{neweq}) of the effects of GSR (panel \subref{A}) vs. sine-Wiener (panel \subref{B}) noises on GL model: $M$ vs $\tau$. For $\lambda =1.5$ and $\lambda =6$ no phase transitions are present in both cases. For $\lambda =0.1$ a transition from low to large values of $M$ can be observed in case of GSR noise, whereas for sine-Wiener noise $M$ remains close to $1$. For GSR noise $\sqrt{2 D}=1$.
}
\label{pluto}
\end{figure}
In figure \ref{gigio} it is shown the curve $M$ vs. $\sqrt{2 D}$ corresponding to a GL system perturbed by a GSR noise with $\lambda = 1$ and $\tau=2$, and the corresponding SW noise (obtained via eq. (\ref{neweq}) ) for $\lambda = 1$. In case of GSR noise a phase transition is observed, whereas for the SW noise no transition is observed.
\begin{figure}[t]
\begin{center}
\includegraphics[width=0.48\textwidth]{Fig13.eps}
\end{center}
\caption{Comparison via eq. (\ref{neweq}) of the effects of GSR vs. sine-Wiener noises on GL model: $M$ vs $\sqrt{2 D}$. In both curves $\lambda = 1$, and in GSR noise $\tau=2$.
}
\label{gigio}
\end{figure}
\FloatBarrier
\section{Concluding remarks}
Here we defined a novel spatiotemporal bounded noise, which is derived from the sine-Wiener temporal bounded noise, and from the spatiotemporal unbounded GSR noise.
By numerical simulations, and by the properties of the variance of the GSR noise, we showed that the SW noise may undergo a stochastic bifurcation assuming as bifurcation parameter $\lambda$, or $D$. Moreover such bifurcation is also observed in $\tau$, but for large values of this parameter.
In \cite{deFradOnpre} we showed that the Cai-Lin noise also undergoes stochastic bifurcations, which however, are of different nature. Indeed, in that case the bifurcation is from bimodality to unimodality, whereas here the transition is from bimodality to trimodality. Moreover, both for Cai-Lin and for Tsallis-Borland noises no bifurcation is observed with increasing $\tau$, whereas in case of sine-Wiener noise the temporal autocorrelation parameter $\tau$ can induce the bimodality/trimodality transition.
Then we studied the role of the defined noise in the additive perturbation of the GL model. We obtained some effects of interest, among which: i) re-entrant phase transitions in $\sqrt{2D}$; ii) transitions from uni-modality to bi-modality in the distribution of the GL field in correspondence to ordered and disordered phases; iii) disordered phase characterized by clusters of the GL field whose size depends on $\lambda$, and whose permanence depends on $\tau$; iv) different temporal length of transient depending on the assumed initial conditions.
We compared the effect of bounded perturbations on GL systems with those relative to unbounded GSR perturbation with the same fluctuation statistics and spatiotemporal features. This investigation allowed us to stress out, with both numerical simulation and analytical considerations, that the boundedness of noise is crucial for the stability of the 'ordered' state.
The phase transition in $\tau$ observed here share some features with those induced by Cai-Lin spatiotemporal noise \cite{deFradOnpre}, but the transition point occurs in different ranges.
It follows that the observed phenomenologies strongly depend on the specific model of noise that has been adopted. Then in absence of experimental data on the distribution of the stochastic fluctuations for the problem in study, could be necessary to compare multiple kinds of possible stochastic perturbations models. This is in line with similar observations concerning bounded noise-induced-transitions in zero-dimensional systems \cite{pre}.
Finally, here we faced a systematic comparison between the defined bounded noise and the GSR unbounded noise.
As far as the future investigations are concerned, the priority will be given to a real understanding of the physics underlying the observed phase transitions.
Moreover, we want to point out the need to perform analytical studies in order to exactly characterize the origin of the bifurcations for the different the sine-Wiener and other bounded noises. This might be important for the above-mentioned physical investigation of the bounded noise-induced phase transitions.
Finally, following the second 'recipe' one might define an entire wide family of spatiotemporal noises derived from the GSR noise, as follows:
$$ \zeta(x,t)= f\left( 2 \ \pi \ \xi(x,t) \right),$$
where $\xi(x,t)$ is a GSR noise, and $f(u)$ is a bounded continuous function. In a further work we shall compare the results here illustrated with those obtained by varying the specific function $f(.)$.
\section*{Acknowledgments}
This research was performed under the partial support of the Integrated EU project \textit{P-medicine - From data sharing and integration via VPH models to personalized medicine} (Project No. 270089),
which is partially funded by the European Commission under
the Seventh Framework program.
\bibliographystyle{spphys}
|
{
"timestamp": "2012-11-26T02:03:57",
"yymm": "1206",
"arxiv_id": "1206.6020",
"language": "en",
"url": "https://arxiv.org/abs/1206.6020"
}
|
\section{Introduction}
\label{intro}
\emph{Anonymous database search} is the discipline
dedicated to the study of the anonymity of query searches in
databases.
Namely, it is dedicated to
the study of protocols that allow a user to retrieve information
from a database server without revealing for the server who he is.
Anonymous database search has also been called
\emph{user-private information retrieval} (UPIR) \cite{DoBr,DoBrWuMa,StBr_Optimal}.
Another example of anonymous database search, similar to the previously mentioned UPIR protocols, is the protocol Crowds~\cite{crowds}.
Unlike private information retrieval (PIR) protocols, UPIR protocols
do not hide the query for the database.
Usually, when people access to a database server,
the responsibility of guaranteeing their privacy is
assigned to the database owner or to a trusted third party.
In UPIR protocols, the privacy
of the user is put in the hand of the user.
The protocols for UPIR presented in~\cite{DoBr,DoBrWuMa,StBr_Optimal,SwansonStinson}
are defined on a P2P community, and are called P2P UPIR.
In a P2P UPIR protocol, a community of P2P users agree to
collaborate in order to search the database anonymously.
The users send queries to the database on behalf of other
users, without revealing to the database the identity of
the owner of the query. In this way, the query profiles
of the users are diffused among the rest of the users in the community.
Moreover, the protocol distributes the queries uniformly
to avoid tracing of the queries.
The queries are written and read by users on some memory sectors that are called communication spaces.
Each user has access only to some of these communication spaces.
As a mean to hinder unauthorized entities from obtaining
information about queries,
the information on the communication spaces is encrypted.
If the encryption of the information on the communication spaces
uses the same key over the entire network,
then there is a high risk that the key is compromised.
But if the number of keys is too large, then the users can have
storage problems.
Given a set of requirements on the protocol, the search for the
optimal distribution of communication spaces and keys
can be defined as a combinatorial problem with constraints.
In the articles \cite{DoBr,DoBrWuMa,StBr_Optimal,StBr_PAIS2011}, combinatorial configurations,
also known as regular and uniform partial linear spaces,
were used to manage the distribution of communications spaces and keys for P2P UPIR.
These combinatorial structures have been used for
distributing keys also in other contexts, as in \cite{Stinson1}.
In \cite{SwansonStinson} the P2P UPIR protocols were modified and
extended to more general families of block designs.
Curious users should be prevented from obtaining information about other users.
It can be proved that this prevention is simplified if we avoid, for instance,
designs in which a pair of users share
more than one communication space, and designs in which a user can
access all communication spaces.
This motivates the use of combinatorial configurations.
In this article, we apply $k$-anonymity arguments to the construction of anonymous database search algorithms, and present the combinatorial configurations with $k$-anonymous (open) neighborhoods and with $k$-anonymous closed neighborhoods.
We study transversal designs and linear spaces and we show that they are optimal configurations.
We present two versions of
P2P UPIR protocols that are based
on the protocols presented in~~\cite{DoBr,DoBrWuMa,StBr_PAIS2011,SwansonStinson}.
In one of the protocols (P2P UPIR 1), the user cannot forward directly his own
queries to the database, while in the other he is allowed to do so (P2P UPIR 2).
The P2P UPIR 2 protocol is a modification of P2P UPIR 1,
designed in order to avoid so-called neighborhood attacks in some combinatorial configurations.
These attacks were first described in \cite{StBr_PAIS2011}, and are based on query repetition from the P2P UPIR users,
in combination with unique neighborhoods in the combinatorial configuration.
As observed in \cite{SwansonStinson}, a neighborhood attack can be modeled as the intersection of neighborhoods,
that may return a single identified point in the case of a unique neighborhood.
In this article we give several examples of combinatorial configurations with unique neighborhoods.
The results presented in this article, shows that it is not necessary for users to self-submit their queries in order to avoid neighborhood attacks.
We present a family of combinatorial configurations with anonymous neighborhoods.
We use the concept of $k$-anonymity to measure the degree of this anonymity,
and say that a point has a $k$-anonymous neighborhood if it is the neighborhood of at least $k-1$ other points.
Then we study and characterize the combinatorial configurations with $k$-anonymous neighborhoods.
In particular, we justify why the transversal designs can be regarded as optimal among the configurations with $k$-anonymous neighborhoods for P2P UPIR 1.
We also characterize the anonymity that is provided by the P2P UPIR 1 protocol, when combinatorial configurations with $k$-anonymous neighborhoods are used.
As can be deduced from \cite{StBr_Optimal}, the linear spaces are optimal configurations with respect to maximal diffusion of the query profiles.
In \cite{SwansonStinson}, the linear spaces were presented as the only combinatorial configurations that can provide P2P UPIR with so-called \emph{perfect anonymity}, and this was extended to designs in general, among which the covering designs were distinguished for having the same property.
In this article we show that the linear spaces have $k$-anonymous closed neighborhoods,
and that they maximize the parameter $k$.
This justifies once again why the linear spaces are optimal for P2P UPIR.
We also construct a new class of combinatorial configurations that also have $k$-anonymous closed neighborhoods, but in which $k$ is smaller compared to the linear spaces.
This article is structured as follows.
Section~\ref{sec:prel} contains the preliminaries,
in Section~\ref{sec:P2PUPIR} we define the P2P UPIR protocols,
Section~\ref{sec:notations} introduces notation and formalizes P2P UPIR in terms of data privacy.
In Section~\ref{sec:compprot} we describe attacks on P2P UPIR and identify ways to avoid some of them.
Section~\ref{sec:privP2PUPIR} discusses the nature of the privacy that can be provided by the P2P UPIR protocols and under what conditions.
In Section~\ref{sec:unique_neighborhoods} we give examples of combinatorial configurations with unique neighborhoods.
Sections~\ref{sec:combconfnanneighbors} and \ref{sec:nanonymousclosedneighborhoods} classify and give examples of combinatorial configurations with $k$-anonymous neighborhoods and $k$-anonymous closed neighborhoods, respectively.
The article ends with conclusions.
\section{Preliminaries}
\label{sec:prel}
This section contains known results and concepts that will be used in the rest of the article.
\subsection{Privacy}
Most of the definitions and the notation presented in the next paragraph are taken from \cite{Privacy}.
An \emph{adversary}, or an \emph{attacker}, is an entity that aims for the destruction of the privacy protection.
A subject $s$ is \emph{anonymous} if the adversary cannot identify him within a set of subjects.
We call this set of subjects the \emph{anonymity set} of $s$.
Two or more so-called items of interest are \emph{unlinkable} if within the system (comprising these and possibly other items), the adversary cannot sufficiently distinguish whether these items of interest are related or not.
In this article, an item of interest can be a query, a sequence of queries, the owner of a query, the owner of a sequence of queries, or the identity of this owner.
We can express anonymity in terms of unlinkability; anonymity is provided if it is not possible to link a subject to the identity of this subject.
\emph{Confidentiality} is the quality of being prevented from the disclosure of information to unauthorized individuals or systems.
Disclosure risk control for statistical databases is a research area concerned with the protection of the privacy of individuals in published statistical databases~\cite{Josep2,Willenborg}.
The naive solution is to protect the database by simply removing the identifiers (e.g. name, ID, social security number) from the tables.
It is well-known that this solution is far from satisfactory.
In many cases it is rather easy to recover the identifier of the anonymized record (see for example~\cite{Sweeney}).
Other more sophisticated solutions for protecting databases have been proposed.
Many of these solutions are methods for obtaining so-called $k$-anonymity, which we will define below.
A \emph{database} is a collection of records of data.
We may assume that all records correspond to distinct individuals or objects.
Every record has a unique identifier and is divided into attributes.
The attributes can be very specific, as the attributes ``height'' or ``gender'', or more general, as the attributes ``text'' or ``sequence of binary numbers''.
Suppose that the database can be represented as a single table.
Let the records be the rows of the table and let the attributes be the columns.
The intersection of a row and an attribute is a cell in the table, and we call the data in the cells the entries of the database.
Also other data structures, like for example graphs, or in general, incidence structures,
are representable in table form.
Let $T$ be a table with the set of attributes $A$.
Let $B\subseteq A$ be a subset of these attributes.
We denote the projection of the table on the attributes $B$ by $T[B]$.
We suppose that every record contains information about a unique individual.
An \emph{identifier} $I$ in a database is an attribute such that it uniquely identifies the individuals behind the records.
In particular, any entry in $T[I]$ is unique.
A \emph{quasi-identifier} $QI$ in the database is a collection of attributes $\{A_1,\dots,A_n\}$,
such that they in combination can uniquely, or almost uniquely, identify a record \cite{Dalenius}.
That is, the structure of the table allows for the possibility that an entry in $T[QI]$ is unique, or that there are only a small number of equal entries.
In the former case the entry in $T[QI]$ uniquely identifies the individual behind the record and in the latter, the few other individuals with the same entries in $T[QI]$ may form a collusion and use secret information about themselves in order to make this identification possible.
\begin{example}
If a table contains information on students in a school class, the attributes birth data and gender could be sufficient to determine to which individual a record of the table corresponds, although it is possible that not all records will be uniquely identified in this way.
Hence for this table, birth date and gender are an example of a quasi-identifier.
\end{example}
The following definition of $k$-anonymity appeared for the first time in~\cite{ref:Samarati.Sweeney.1998} (see also~\cite{Sweeney}).
\begin{definition}\label{1:def:nan}
A table $T$, that represents a database and has associated quasi-identifier $QI$, is $k$-\emph{anonymous} if every sequence in $T[QI]$ appears with at least $k$ occurrences in $T[QI]$.
\end{definition}
\subsection{Incidence Structures}
\label{sec:incidencegeometry}
An \emph{incidence structure} $(P,L,I)$
sometimes also called a \emph{block design},
consists of a \emph{point set} $P$, a family of subsets of points $L$ called \emph{block set},
and an \emph{incidence relation} $I$ on $P$.
The elements of $P$ and $L$ are called \emph{points} and \emph{blocks}, respectively, and two points are related by the incidence relation if and only if there is a block containing both. In this article we assume that the same block only can appear once in $L$.
We will also assume that the incidence structures are connected,
so that for every two different points $p,q\in P$,
there is a chain of incidences starting with $p$ and ending with $q$.
If all blocks have the same number of points $k$,
then we say that the incidence structure is \emph{$k$-uniform}.
If all points are in the same number of blocks $r$,
then we say that the incidence structure is \emph{$r$-regular}.
The \emph{order} of a uniform and regular incidence structure is the integer pair $(k-1,r-1)$.
A \emph{parallel class} $L'$ is a subset of $L$ such that for all $p\in P$ there is a unique block $l\in L'$ such that $p\in l$. Every parallel class $L'=\{l_1,\ldots,l_m\}$ is a partition of $P$, because
$P=\cup_{i=1}^{m} l_i$ and $l_i\cap l_j=\emptyset$ for $1\leq i< j\leq m$. An incidence structure $(P,L,I)$ is \emph{resolvable} if there exists a partition of the set of blocks $L=\{L_1,\dots,L_s\}$,
such that $L_i$ is a parallel class of blocks for $1\leq i\leq s$.
The \emph{line} spanned by two points is the intersection of the all blocks containing these points.
When every pair of points is contained in at most one block, then the blocks are the lines.
In an incidence structure in which the blocks are lines, we say that two points are \emph{collinear} if there is a line through the two points.
Observe that a point is always collinear with itself.
We define the \emph{closed neighborhood} $CN(p)$ of $p$ as the set of points that are collinear with the point $p$.
The \emph{neighborhood} or \emph{set of neighbors} $N(p)$ of a point $p$ is the set of points in $C$ that are collinear with $p$ but different from $p$.
Observe that $CN(p)=N(p)\cup\{p\}$.
\subsubsection{Linear Spaces, Partial Linear Spaces, and Combinatorial Configurations}
A \emph{linear space} is an incidence structure in which every two points are in exactly one block,
so in a linear space we may say that the blocks are lines.
It is not required for the lines to have the same number of points, but the minimum number of points on a line is two.
A \emph{finite affine plane} is an incidence structure in which
\begin{itemize}
\item every two points span exactly one line,
\item for every point $p$ and line $l$ not incident with $p$, there is exactly one other line $m\in L$ such that $p$ is incident with $m$ and $l\cap m=\emptyset$,
\item there is a triangle, a set of three points such that they pairwise span different lines.
\end{itemize}
A finite affine plane is therefore a linear space.
In a finite affine plane there is always a natural number $n$ such that there are $n$ points on every line, $n+1$ lines through every point, $n^2$ points and $n^2+n$ lines.
In particular, a finite affine plane is uniform and regular.
The order of a finite affine plane is $(n-1,n)$, but the usual notation is that the order is $n$.
The second condition in the definition of affine plane implies that the set of lines is partitioned into classes of parallel lines, so that an affine plane is resolvable.
The affine plane over a finite field of order $q$ is always a finite affine plane of order $q$ which we denote by $\mathbb{A}(\mathbb{F}_q)$.
From this it is deduced that there exists a finite affine plane of order $q$ for every prime power $q$.
When $q$ is not prime, then there are other finite affine planes than $\mathbb{A}(\mathbb{F}_q)$,
but it is not known if there are finite affine planes of order $n$ when $n$ is not a power of a prime.
It is conjectured that there exists a finite affine plane of order $n$ if and only if $n$ is a power of a prime.
Other examples of uniform and regular finite linear spaces are the finite projective planes, the unitals, and the Denniston designs.
A \emph{partial linear space} is an incidence structure in which every two points can be on at most one line.
Also it is required that the minimum number of points on a line is two.
All linear spaces are partial linear spaces.
The number of points is usually denoted by $v$, and the number of lines by $b$.
In this article we will concentrate on $r$-regular and $k$-uniform partial linear spaces, also known as \emph{combinatorial $(v,b,r,k)$-configurations, } or shorter, $(r,k)$-configurations.
For general references on combinatorial configurations, see \cite{Gropp,Grunbaum}.
\subsubsection{$t$-Designs and Transversal Designs}
\label{sec:transversaldesigns}
Another interesting type of incidence structure are the $t$-designs. A $t$-\emph{design} with parameters $(v,k,\lambda)$ has $v$ points, $k$ points in every block and every $t$-element subset of points appear in exactly $\lambda$ blocks.
A $2$-design with $\lambda=1$ is a combinatorial configuration, or more precisely, a regular linear space.
The $2$-designs are also called \emph{balanced incomplete block designs} with parameters $(v,k,\lambda)$, or shorter, \emph{$(v,k,\lambda)$-BIBD}.
In this article we will also treat a third type of incidence structure, the transversal designs.
A \emph{transversal design} $TD_{\lambda}(k,n)$ is a $k$-uniform incidence structure $(P,L,I)$ with $|P|=kn$ that admits a partition of $P$ whose parts, called \emph{groups}, have cardinality $n$, and satisfy the following properties:
\begin{enumerate}
\item any group and any block contain exactly one common point, and
\item every pair of points from distinct groups is contained in exactly $\lambda$ blocks.
\end{enumerate}
In a transversal design the set of groups forms a partition of $P$ but it is not a parallel class since the groups are not blocks.
On the other hand, if the block set $L$ can be partitioned into parallel classes, then we get a resolvable transversal design.
A transversal design $TD_{\lambda}(k,n)$ is a combinatorial $(kn,n^2,n,k)$-configuration if and only if $\lambda=1$. In this article we are interested in the transversal designs of this kind.
For simplicity of notation we will denote a transversal design with $\lambda=1$ by $TD(k,n)$.
It is well-known that affine planes can be used to construct transversal designs as described in the following lemma.
\begin{lemma}\label{1:lem:transaffine}
Whenever there exists a finite affine plane of order $n$, then for every $2\leq k\leq n$ there exists a transversal design $T(k,n)$.
\end{lemma}
\begin{proof}
As point set $P$ of $TD(k,n)$, take the points on the $k$ lines from one of the parallel classes of an affine plane of order $n$.
As the groups of $TD(k,n)$, take the lines from the same parallel class.
As lines of $TD(k,n)$, take the lines in the rest of the parallel classes of the affine plane, restricted to the points in $P$.
\end{proof}
More generally, it is well-known that the existence of transversal designs is related to the existence of a set of mutually orthogonal latin squares. For more information about these structures, see for example \cite{MOLS,PaBoSh}.
\section{P2P UPIR: peer-to-peer protocols for anonymous database search} \label{sec:P2PUPIR}
In this section we describe the peer-to-peer protocols
for user-private information retrieval (P2P UPIR), first presented in~\cite{DoBr,DoBrWuMa}.
These protocols use communication spaces, that are memory sectors in which
a user who has access to the corresponding cryptographic key can write and read queries and the answers to these queries.
The distribution of the cryptographic keys is determined by a combinatorial configuration.
The clients of the protocol are mapped to the points of a combinatorial configuration, and
the keys, or the communication spaces, are mapped to the lines.
The result is that a client, represented by the point $p$, has the cryptographic keys giving access to the communication spaces that are represented by the lines through $p$.
\subsection{The P2P UPIR INIT protocol}
The P2P UPIR protocols described herein are called by a protocol that is implemented by all the community of users together.
We call this protocol P2P UPIR INIT.
This protocol takes as parameter the combinatorial configuration for the distribution of communication spaces.
By abuse of notation, we will not distinguish the points and the lines of the configuration from the users and the communication spaces they represent.
A communication space is a queue of messages, together with a cryptographic key from a symmetric cipher, used to encrypt the messages.
The precondition is here that a community $P$ of $n$ users wants to implement a P2P UPIR protocol.
The postcondition is that some user has dropped out of the protocol.
\begin{protocol}[P2P UPIR INIT]~\label{1:def:p2pupirinit}
\begin{enumerate}
\item The users in $P$ are mapped to the points of the combinatorial configuration.
\item The users repeat execution of the P2P UPIR protocol with frequency $f$ (which is not required to be constant, nor the same for all users).
\end{enumerate}
\end{protocol}
\begin{remark}\label{rmk:freqcheck}
The protocol described in \cite{DoBr,DoBrWuMa} was different.
For instance, the user repeated the P2P UPIR protocol only when they had a query to post.
However, in order to limit the waiting time before a user can post his query, and the response time for the answer, the period of protocol repetition must be bounded. More exactly, $f$ should always be higher or equal to the highest query submission frequency among the users.
\end{remark}
\begin{remark}
It is not necessary to end the P2P UPIR INIT protocol only because a user $p$ is temporally away.
The owner of a query that should have been posted by $p$, can execute P2P UPIR again in order to get his query posted to the server.
However, we think that only modest and controlled absences should be allowed for, since a prolonged absence causes the deterioration of the provided anonymity.
\end{remark}
\subsection{The P2P UPIR 1 protocol}
First we present a P2P UPIR protocol which is similar to the protocol described in \cite{DoBr,DoBrWuMa}, but modified following the ideas from \cite{SwansonStinson}.
We will call this protocol P2P UPIR 1.
The individual $p$ is member of a community of user implementing the P2P UPIR INIT protocol, and $p$'s execution of P2P UPIR 1 is done within P2P UPIR INIT.
The user $p$ may, or may not, have a query $Q$ which he wants to post to the community.
\begin{protocol}[P2P UPIR 1]~
\begin{enumerate}
\item The user (point) $p$ selects uniformly at random a communication space (line) $l$ passing through $p$;
\item $p$ decrypts the content on $l$ using the corresponding cryptographic key.
The outcome is a queue of messages $M=(M_i)$.
For every message $M_i$ in the queue:
\begin{itemize}
\item If $M_i$ is a \textbf{query addressed to $p$}, then $p$ removes $M_i$ from the queue, forwards $M_i$ to the server, receives the answer $A$, encrypts $A$ and writes $A$ to the end of the queue $M$;
\item Else if $M_i$ is an \textbf{answer to a query belonging to $p$}, then $p$ reads $M_i$ and removes $M_i$ from the queue $M$;
\item Else, $p$ leaves $M_i$ on the queue without action;
\end{itemize}
\item If $p$ has a query $Q$, then
\begin{enumerate}
\item $p$ selects uniformly at random a point $p'\neq p$ on $l$;
\item $p$ addresses $Q$ to $p'$ and writes $Q$ to the end of the queue $M$.
\end{enumerate}
\end{enumerate}
\end{protocol}
\subsection{The P2P UPIR 2 protocol}
We also present a variation of the former protocol which we will call P2P UPIR 2. This protocol was first described in~\cite{StBr_PAIS2011}.
The P2P UPIR 2 protocol differs from the P2P UPIR 1 protocol only in how the users forward their own queries.
We say that a user who forwards his own queries with probability $x$, has self-submission $x$.
The P2P UPIR 2 protocol with self-submission $x$ is obtained from the P2P UPIR 1 protocol by replacing step 3 (a) by: \bigskip\\
\textit{3 (a') $p$ selects a point $p'$ on $l$; with probability $x$ he selects $p'=p$, else he selects uniformly at random $p'\neq p$ on $l$;}
\bigskip
\begin{remark}
As will be proved in Proposition~\ref{prop:selfsubmission}, the P2P UPIR 2 protocol should be executed with self-submission $x=\frac{1}{|CN(p)|}=\frac{1}{r(k-1)+1}$.
\end{remark}
\section{Notations and formal framework for the analysis of P2P UPIR}
\label{sec:notations}
In this section we will define the formal framework in which the rest of the analysis will take place.
\subsection{Queries}
Let $P$ be a community of users implementing an instance of the P2P UPIR protocol.
For every user $p\in P$ we define the \emph{real query profile} $RP(p)$ as the temporal sequence of queries which $p$ posts to the communication spaces and
the \emph{apparent query profile} $AP(p)$ as the temporal sequence of queries which the user posts to the server.
By extension we define the real query profile $RP(U)$ and the apparent query profile $AP(U)$ of a set of users $U\subseteq P$.
A \emph{query} is a set of one or more search terms.
A \emph{repeated query} is a query which occurs more than once in the real profile of a user.
A \emph{repeated variation of a query} is a query posted by a user which is a slight modification of a previous query posted by the same user.
The latter definition is vague and ambiguous, but still useful.
We say that a profile is \emph{rare} if it contains many unique queries or unique combinations of queries and we say that it has repetition if it contains many repeated queries or repeated variations of queries.
\subsection{Disclosure control}
\label{sec:disclosurecontrolp2pupir}
In \cite{Josep1}, three types of privacy protection are distinguished, in function of the entity for whom the protection is provided: \emph{respondent privacy}, \emph{owner (holder) privacy} and \emph{user privacy}.
The aim of the P2P UPIR protocol is to provide anonymous database search, which falls under the area of anonymous communication, or, following the notation in \cite{Josep1}, user privacy.
However, in this article we choose to model it in the context of respondent privacy, as a method for disclosure control of databases.
The database protected by P2P UPIR is then the collection of queries that the users of the protocol post to the server, or in other words, the real profiles of the users.
There are two important differences between traditional disclosure control for statistical databases (respondent privacy) and disclosure control for P2P UPIR (user privacy interpreted as respondent privacy):
\begin{itemize}
\item Typically, in respondent privacy, the disclosure control is applied to a given database.
The P2P UPIR protocol is however executed in real-time as the users post queries to the server, that is, as the information is introduced into the database.
We may say that the P2P UPIR is a \emph{streaming} disclosure control method;
\item For respondent privacy, it is typically important to balance low disclosure risk with low information loss,
since it is useless to publish a database without information.
In the P2P UPIR protocol, the users anonymize the data they give to the server themselves, instead of leaving this task to the server.
For the aim of P2P UPIR, there is no need to control the utility of the query profiles collected by the server.
We will assume that the users have no interest in providing a useful statistical database.
\end{itemize}
Some users find useful some of the services provided by the server that are based on their query (or mail) profile.
Also, typically, the server provides query searches for free, in exchange for the valuable information that is collected in the query profiles.
We propose that the query profile should be maintained by the user himself, and provided to the server when so desired.
This approach would put the privacy of the user in the hand of the user, where it should be.
Consider a community of $v$ users $P$ implementing an instance of the P2P UPIR 1 protocol.
Without loss of generality, we can limit the analysis to some time interval $t$.
Then we note by $RP_t(P)$ and $AP_t(P)$ the profiles $RP(P)$ and $AP(P)$ restricted to $t$.
In this context, the P2P UPIR protocol is a transformation of the database which we will denote by
$$\begin{array}{rccl}
\rho:&D&\rightarrow &D\\\\
&RP_t(P)&\mapsto& AP_t(P),
\end{array} $$
where $D$ is the space of all possible query databases.
The database $RP_t(P)$ is a table
where the identifier is the user ID,
and there is an attribute Q($t_i$) for every approximate time interval $t_i$,
containing a single query posted to the server by the user approximately at time $t_i$, or a null entry.
After applying $\rho$ to this table we obtain the transformed database $AP_t(P)$.
The action of $\rho$ can be described as swapping the data in the column $Q(t_i)$,
under the constraint that the content in the record $p$ can be replaced by the content in the record $q$ only if $p\in N(q)$.
Disclosure control methods of this type are called data swapping (first appearance in~\cite{DaleniusReiss}).
Observe that $\rho$ also adds some noise to the time stamps of the queries.
For example, the fact that $p$ posts first $Q_1$ and then $Q_2$ to the community of users, does not imply that $Q_1$ is posted before $Q_2$ to the server.
Therefore, in order for the swapping to preserve columns, we should think of the queries in $AP_t(P)$ as sorted according to the time they are posted to the community of users, not according to the time they are posted to the server.
\section{Attacking P2P UPIR}
\label{sec:compprot}
In this section we will discuss attacks on P2P UPIR and some countermeasures.
The purpose with the P2P UPIR protocol is to protect the privacy of the user when retrieving information from a server.
Therefore our main concerns are attacks from the server, or adversaries that have characteristics similar to the server.
We will also briefly consider attacks from other users.
\subsection{Neighborhood attacks on P2P UPIR 1}
\label{sec:attack1}
In the P2P UPIR 1 protocol the user forwards to the server only queries from collinear users different from himself.
Consider a community of users implementing the P2P UPIR 1 protocol and suppose that the initialization protocol is given a combinatorial configuration such that there are points with unique neighborhoods.
That is, there are points $p\in P$ such that $N(q)\neq N(p)$ for every $q\in P$, $q\neq p$.
The users are mapped to the points in the combinatorial configuration, and a user $p$ will share communication spaces with the set of users $N(p)$,
so that the users who post the queries in $RP(p)$ are the users in $N(p)$.
Now suppose that the user $p$ repeats the same query over and over again.
After a while, the probability that all users in $N(p)$ have posted the query will be high.
Therefore, since we know that $p$ is the only user with the neighborhood $N(p)$, if the query is rare, then we will be able to link the query to the user $p$, and so the anonymity provided by the protocol is broken.
The article \cite{StBr_PAIS2011} discussed the fact that it is very common that users of web-based search engines post the same or a slightly modified version of the same query several times.
Other references on this subject are \cite{SpWoJaSa,Teevan}.
Examples of combinatorial configurations with unique neighborhoods
are provided in Section~\ref{sec:unique_neighborhoods} and combinatorial configurations with $n$-anonymous neighborhoods are discussed in Section~\ref{sec:combconfnanneighbors}.
\subsection{Adjusting query self-submission for P2P UPIR 2}
\label{sec:modification}
We just saw that the P2P UPIR 1 protocol,
which is similar to the version of the P2P UPIR protocol that appears in \cite{DoBr,DoBrWuMa},
can be attacked if the configuration that is used has points with unique neighborhoods.
Examples of combinatorial configurations with unique neighborhoods are the linear spaces (see Section~\ref{sec:unique_neighborhoods}).
This is a problem, since otherwise the linear spaces are optimal configurations for P2P UPIR,
if we consider the anonymity of the user in front of the server.
The use of a linear space maximizes the number of apparent profiles into which the real profile of a user is diffused,
under the restriction that we keep the cardinality of the user community fixed.
In particular, a linear space is the only type of combinatorial configuration in which,
for all points $p$, the point set satisfies $P=N(p)\cup\{p\}=CN(p).$
We want to modify the protocol so that the use of linear spaces resists the attack described in the previous section.
A first approach is to let the user $p$ forward also his own queries. In this way he will forward the queries from $CN(p)$.
However, this implies that the users will forward to the server more of their own queries than queries of other users.
Indeed, if the user $p$ for every line $l$ selects a point $p'$ on $l$ with equal probability,
then $p$ will select $p'\neq p$ with probability $\frac{1}{rk}$, and himself with probability $\frac{1}{k}>\frac{1}{rk}$.
The server can therefore infer the real profile of a user from his apparent profile.
There will be partial protection of the privacy of the user in front of the server.
But if we let the protocol run for a while in order to let the user post enough queries,
then a user's real profile will be inferable from his apparent profile.
A compromise between these two extremes is to let the user adjust the proportion of self-submission of queries so that his real profile results uniformly distributed over the apparent profiles of the users in $CN(p)$.
This is the strategy employed by the P2P UPIR 2 protocol, as will be illustrated below.
\begin{definition}
Let $p_0$ be a user in a P2P UPIR community.
We say that $p_0$'s real query profile is uniformly and independently distributed over the apparent query profiles of a set of users $A$, if, for all queries $Q\in RP(p_0)$ and for all users $p\in A$, the events ``$p$ forwards $Q$ to the server'', have equal probability and are mutually independent.
\end{definition}
A user $p_0$ in a community of users who are executing the P2P UPIR 1 protocol from the P2P UPIR INIT protocol, selects the proxy for every query uniformly at random from $N(p_0)$, and the choices are independent.
It is therefore clear that the $RP(p_0)$ is uniformly and independently distributed over the apparent query profiles of $N(p_0)$.
We will now see that we can adjust the self-submission in P2P UPIR 2 and achieve a uniform and independent distribution of $RP(p_0)$ over the apparent query profiles of $CN(p_0)$.
\begin{proposition}
\label{prop:selfsubmission}
Let $p_0$ be a user in a P2P UPIR 2 community.
Then $p_0$'s real query profile is uniformly and independently distributed over the apparent query profiles of $CN(p_0)$, if and only if $p_0$'s probability of query self-submission is $\frac{1}{|CN(p_0|}=\frac{1}{r(k-1)+1}$.
\end{proposition}
\begin{proof}
The set of users who forwards $RP(p_0)$ to the server is $CN(p_0)$.
It is clear that $RP(p_0)$ is uniformly distributed over $CN(p_0)$ if the probability for any user in $CN(p_0)$ to forward any of $p_0$'s queries is $1/|CN(p_0)|$.
In particular this implies that for $RP(p_0)$ to be uniformly distributed over $CN(p_0)$, $p_0$ should have self-submission probability $1/|CN(p_0)|$.
We have $|CN(p)|=r(k-1)+1$ for all $p$.
We will now see that, for $RP(p_0)$ to be uniformly distributed over $CN(p_0)$,
it is sufficient that $p_0$ has self-submission probability $1/(r(k-1)+1)$.
Suppose $p_0$ has self-submission probability $1/(r(k-1)+1)$.
Let $Q$ be a query in $RP(p_0)$.
The probability that $Q$ is posted to the community is $1-\frac{1}{r(k-1)+1}$.
The queries in $RP(p_0)$ are distributed by $p_0$ over his communication spaces following a uniform distribution,
so the probability that $Q$ is posted to the communication space $l$ is $\frac{1}{r}\left(1-\frac{1}{r(k-1)+1}\right)=\frac{k-1}{r(k-1)+1}$.
There are $k-1$ other users than $p_0$ connected to $l$, and they are selected using a uniform distribution,
so the probability that any particular user $p\in N(p)$ will read and forward $Q$ is
$\frac{1}{k-1}\frac{k-1}{r(k-1)+1}=\frac{1}{r(k-1)+1}=\frac{1}{|CN(p_0)|},$
which equals the probability that $p_0$ forwards $Q$.
The choices of communication space and user are independent, so we conclude that for every query $Q$ that $p_0$ posts to the community of users, the events ``$p$ forwards $Q$ to the server'' have equal probability for all $p\in CN(p)$ and that the choices of $p$ are all mutually independent.
\end{proof}
From now on, we will always assume that the P2P UPIR 2 protocol is implemented with self-submission $1/|CN(p)|$, as indicated by Proposition~\ref{prop:selfsubmission}.
\subsection{Closed neighborhood attacks on P2P UPIR 2}
\label{sec:attack2}
The P2P UPIR 2 protocol with self-submission $1/|CN(p)|$ avoids the attack described in Section~\ref{sec:attack1}, when the configuration that is used is a linear space.
However, in general, for other combinatorial configurations, the P2P UPIR 2 protocol also presents weaknesses in case of repeated queries.
The real query profile of $p$ is independently and uniformly distributed over the apparent profiles of $CN(p)$.
If $p$ repeats a rare query enough, then this query can be linked to him whenever the set $CN(p)$ can.
Examples of combinatorial configurations with unique closed neighborhoods
are provided in Section~\ref{sec:unique_neighborhoods} and we discuss combinatorial configurations with $n$-anonymous closed neighborhoods in Section~\ref{sec:nanonymousclosedneighborhoods}.
\subsection{Other attacks}
\label{sec:attack_altres}
Swanson and Stinson described an attack on the P2P UPIR 2 protocol that was based on the intersection of closed neighborhoods~\cite{SwansonStinson}.
Following \cite{StBr_PAIS2011}, they also use a repeated rare query or variation of query, say $Q$.
Instead of focusing on the closed neighborhood of the real owner of $Q$, their concern is the closed neighborhood of the proxy.
The attack consists in intersecting the closed neighborhoods of the users who act as proxy for the query $Q$.
The result of the attack is set of users containing the anonymity set of the origin of the query.
If this set is small, we have reidentification.
It is clear that there is an analogous attack on the P2P UPIR 1 protocol, intersecting the neighborhoods of the proxies.
These attacks are performed by a curious server, just as the attacks in Section~\ref{sec:attack1} and \ref{sec:attack2}.
It is easy to see that an intersection attack can take place if and only if the configuration that is used in the protocol has unique neighborhoods or closed neighborhoods, respectively.
Therefore, the two types of attacks are essentially the same.
As observed by Swanson and Stinson, we can also consider intersection attacks in which the adversary is a user in the community.
In this case $m$ proxies collude in order to find the origin of a sequence of $l$ linked queries.
Swanson and Stinson use other incidence structures than configurations,
where two points may appear together in more than one block.
In this case, the proxies can intersect the blocks over which they received $Q$.
If we use combinatorial configurations, this does not occur, so this is a strong reason for using combinatorial configurations in P2P UPIR.
Below we briefly list other possible attacks:
\begin{itemize}
\item The adversary can reveal the underlying combinatorial structure, by introducing users owned by him in the community. This attack was briefly discussed in \cite{StBr_CRM};
\item The adversary can determine who is in the community, since these users will have very similar apparent query profiles, and this profile will differ from apparent query profiles of users outside the community.
\end{itemize}
\subsection{Discussion}
We want to point out that although P2P UPIR 2 allows for the use of linear spaces without risk for intersection attacks,
and the linear spaces have neighborhoods of maximal cardinality, the original P2P UPIR 1 protocol is still slightly simpler in implementation.
This would be even more so, if the self-submission was expressed as a proportion of the query profile.
Because of its simplicity, the use of the P2P UPIR 1 protocol is still justified,
if anonymity can be ensured.
\section{On the privacy provided by P2P UPIR}
\label{sec:privP2PUPIR}
In this section we specify the type of privacy that can be attained using P2P UPIR.
We also show which combinatorial configurations to use in order to attain this privacy.
\subsection{n-Confusion for P2P UPIR}
We will use the notations on database disclosure control, introduced in Section~\ref{sec:notations}.
As commented there, there is no interest in preserving the utility of the database in the transformation.
We are only interested in minimizing the disclosure risk.
The best result would therefore be a protected database completely free from information.
The P2P UPIR protocol can not achieve this, as single queries contain information and are indivisible.
The purpose of P2P UPIR is to cause confusion on who is the real sender of the query.
It is useful to have a measure of the provided confusion.
\begin{definition}
If the cardinality of the anonymity set for the owner of any sequence of linked queries (or query) is at least $n$,
then we say that we have \emph{$n$-confusion.}
In this case we say that we have \emph{$n$-confusing} P2P UPIR.
\end{definition}
If it is known who is in the community of users $P$,
then the confusion on who is the sender of a query (i.e. the cardinality of the anonymity set),
cannot be larger than the set of users.
In Section~\ref{sec:attack_altres} we saw that the server can see who is in the community,
since these users will have similar apparent query profiles.
Therefore, the best we can aspire for is a confusion of magnitude $n=|P|$ on who is the owner of a query.
In general, we want to cause confusion on who is the owner of a sequence of queries, also when the sequence is linkable by content.
Also in this case, the obvious upper bound for the confusion is $n\leq |P|$.
We are interested in achieving $n$-confusion also for $n<|P|$,
if this can be justified by other advantages, for example, as in this article, if it permits us to use the simpler P2P UPIR 1 protocol, instead of the slightly more complicated P2P UPIR 2 protocol.
In the following example we see that $n$-confusion with $n>1$ can fail to be achieved by P2P UPIR, if the sequence of linked queries contains a quasi-identifier.
Therefore, in this case, the owner of the query sequence will not be anonymous.
\begin{example}
\label{ex:nonanonymity}
Consider a sequence $s$ of queries posted by a user $p$ that is linkable by content.
Observe that this does not imply that $s$ is linkable to $p$.
Suppose that the content of the queries in $s$ gives information for linking $s$ to $p$.
Then the anonymity set of $s$ has cardinality one, so P2P UPIR cannot provide anonymity for $p$ with respect to $s$.
\end{example}
In the following we will always assume that $AP_t(P)$ does not contain sequences of linkable queries with quasi-identifiers.
Under this assumption, the privacy provided by P2P UPIR in case of sequences of linked queries is anonymity; we can link the queries but we cannot link them to their owner.
The presence of sequences of queries that are linkable because of their content, obstructs unlinkability in P2P UPIR.
However, if we assume that the adversary cannot use the query content for the analysis,
then unlinkability can be provided for the queries.
This situation may occur, for example, if the range of possible queries is small.
More precisely, it occurs if there are no rare repeated queries.
Either we have anonymity or we do not.
Anonymity is therefore provided by P2P UPIR if the protocol satisfies $n$-confusion with $n>1.$
The anonymity of a user can be broken by a collusion of the $n-1$ other users in his anonymity set.
Therefore it is interesting to maximize $n$.
In the previous discussion we have always assumed that the information that is available to the adversary is the same information that is available to the server.
We have anonymity also with respect to other users if the identity of the owner of a query, or a sequence query, is known only to this user.
This occurs if traffic analysis is prohibited,
there are no linked sequence of queries with quasi-identifier,
and the number of users $k$ on every communication space is large.
If $k=2$ then one of the two users know with certainty who is the owner of the query.
In general, a collusion of $k-1$ users is needed to deduce the identity of the query owner.
Therefore it is interesting to maximize $k$.
On the other hand, if we assume that the adversary user can see the identity of the query owner, using for example traffic analysis, or if there are linked sequences with quasi-identifier,
then it is interesting to break up the real query profiles in small parts, in order to provide some confidentiality.
In this case it is therefore interesting to maximize $r$, the number of communication spaces per user.
\subsection{n-Anonymity for P2P UPIR}
It is clear that the use of P2P UPIR does not imply that the resulting database $AP_t(P)$ is $n$-anonymous in the sense of Definition~\ref{1:def:nan}.
Indeed, $AP_t(P)$ will not in general have $n$ occurrences in $n$ different records of any sensitive sequence $s$.
The attacks in Sections~\ref{sec:attack1} and \ref{sec:attack2} suggest that there is a quasi-identifier present in $AP_t(P)$.
For P2P UPIR 1 and P2P UPIR 2 this quasi-identifier is the set of neighborhoods and the set of closed neighborhoods, respectively.
Formally, before transforming the database $RP_t(P)$ using the P2P UPIR protocol transformation $\rho$,
we first add the attribute $N(p)$ (or $CN(p)$) of $p\in P$ to $RP_t(P)$.
The attribute $N(P)$ (resp. $CN(P)$) is invariant for the action of $\rho$,
which in particular means that $\rho$ preserves its quasi-identifying property.
According to Definition \ref{1:def:nan},
in order to make $AP_t(P)$ $n$-anonymous with respect to this quasi-identifier,
we have to ensure that every element of the set of neighborhoods (resp. the set of closed neighborhoods) occurs at least $n$ times in $AP_t(P)$.
\begin{definition}\label{def:nanconf1}
We say that a combinatorial configuration has $n$-\emph{anonymous neighborhoods} (resp. $n$-\emph{anonymous closed neighborhoods}),
if every neighborhood (resp. closed neighborhood) of a point,
is the neighborhood (resp. closed neighborhood) of at least $n$ points.
\end{definition}
\begin{figure}
\begin{tabular}{ccc}
\resizebox{!}{0.22\textwidth}{\includegraphics{pp2_veins.eps}}
&
\resizebox{!}{0.22\textwidth}{\includegraphics{pp3_veins.eps}}
&
\resizebox{!}{0.22\textwidth}{\includegraphics{pappus_veins.eps}}
\\
The neighborhood
&
The neighborhood
&
The neighborhood\\
of a point in & of a point in & of 3 points in \\
$\mathbb{P}(\mathbb{F}_2)$, the Fano plane& $\mathbb{P}(\mathbb{F}_3)$& the Pappus configuration
\end{tabular}
\end{figure}
\subsection{Achieving n-confusion using n-anonymous neighborhoods and n-anonymous closed neighborhoods}
\label{sec:nconfnan}
In this section we show that the P2P UPIR protocol can offer $n$-confusion, if we use the correct type of combinatorial configuration.
\begin{proposition}
\label{prop:p2pnconf}
Under the assumption that queries can be linked by content, but there are no sequences of linked queries containing quasi-identifiers, the P2P UPIR 1 protocol (resp. the P2P UPIR 2 protocol) provides $n$-confusion if it is implemented with a combinatorial configuration with $n$-anonymous neighborhoods (resp. $n$-anonymous closed neighborhoods).
However, if the sequence of queries cannot be linked by content, then we have $n$-confusion with $n$ equal to the cardinality of the neighborhoods and the closed neighborhoods, respectively.
\end{proposition}
\begin{proof}
We will prove the result for P2P UPIR 1. The proof for P2P UPIR 2 is analogous.
Following the notation in Section~\ref{sec:disclosurecontrolp2pupir}, we apply the P2P UPIR 1 protocol $\rho$ to the database $RP_t(P)$ and obtain $AP_t(P)$.
Suppose that the adversary is allowed to analyze the content of the queries and is able to correctly link a sequence $s$ of queries to each other, as having the same origin, say, the user $p_0$.
We have assumed that $s$ does not contain a quasi-identifier, which could identify $p_0$ as the origin of $s$, by content alone.
Because of the properties of $\rho$, all queries in $s$ will be in the records of $AP_t(P)$ that correspond to $N(p_0)$.
The anonymity set of $s$ is the intersection of the neighborhoods of the neighbors of user $p_0$, that is, $\bigcap_{p\in N(p_0)}N(p).$
If the combinatorial configuration has $n$-anonymous neighborhoods, then this intersection has cardinality at least $n$, so we have $n$-confusion.
If query sequences cannot be linked by content,
then the anonymity set for the owner of a single query is the neighborhood of the proxy of the query.
This anonymity set has cardinality $r(k-1)$, so in this case we have $n$-confusion with $n=r(k-1)$.
\end{proof}
\section{Combinatorial configurations with unique neighborhoods or unique closed neighborhoods}
\label{sec:unique_neighborhoods}
In this section we give examples of combinatorial configurations that have unique neighborhoods or unique closed neighborhoods for all points.
We saw in Section~\ref{sec:compprot} that such configurations should be avoided for the use in P2P UPIR~1 and P2P UPIR~2 respectively.
We provide examples of combinatorial configurations with
\begin{itemize}
\item anonymous neighborhoods but unique closed neighborhoods (as the combinatorial configurations with deficiency one),
\item unique neighborhoods but anonymous closed neighborhoods (as the linear spaces), and
\item unique neighborhoods and unique closed neighborhoods (as the pentagonal geometries without opposite line pairs).
\end{itemize}
\subsection{Examples of combinatorial configurations with unique neighborhoods}
In this section we give examples of combinatorial configurations with unique neighborhoods.
\begin{proposition}
The linear spaces have unique neighborhoods.
\end{proposition}
\begin{proof}
In a regular linear space every pair of points is collinear.
Therefore, for any point $p$, the neighborhood $N(p)$ is all the point set except for $p$, so that $p$ is the only point with neighborhood $N(p)$.
\end{proof}
A \emph{triangle} in a combinatorial configuration is a set of three distinct points such that they are pairwise collinear on three distinct lines.
A combinatorial configuration is \emph{triangle-free} if it has no triangles.
\begin{proposition}\label{2.1:thm:id_conf}
A triangle-free combinatorial configuration, not a graph, has unique neighborhoods.
\end{proposition}
\begin{proof}
Let $C=(P,L,I)$ be a triangle-free $(r,k)$-configuration with $k>2$ (so that it is not a graph).
Fix a point $p_0\in P$ and let $p_1, p_2\in N(p_0)$ be two points collinear with $p_0$.
Let $p_3\in P$ be a point such that $N(p_0)=N(p_3)$.
Then $p_3$ is collinear with $p_1$ and $p_2$, but not with $p_0$,
so that there is no line through all the four points $p_0$, $p_1$, $p_2$ and $p_3$.
Therefore, $p_1$ and $p_2$ can not be collinear, because if they were, then at least one of the triples $p_0$, $p_1$, $p_2$ or $p_1$, $p_2$, $p_3$ would form a triangle.
In other words, no pair of points in $N(p_0)=N(p_3)$ is collinear.
Therefore the number of points on every line in $C$ is $k=2$, because if $k>2$, then there would be at least one pair of collinear points $p,q\in N(p_0)=N(p_3)$.
We deduce that, whenever $k>2$, given a point $p_0\in P$ there is no point $p_3\in P$ distinct from $p_0$ such that $N(p_0)=N(p_3)$.
\end{proof}
A \emph{pentagonal geometry} is a combinatorial configuration in which, for any point $p$, all points not in the closed neighborhood of $p$, are on the same line~\cite{BaBaDeSt}.
This line is called the \emph{opposite line} $p^{opp}$ of $p$.
\begin{proposition}
A pentagonal geometry has unique neighborhoods.
\end{proposition}
\begin{proof}
Let $p\neq q$ be two points in the pentagonal geometry.
Suppose that $q$ is not on $p^{opp}$. Then $p$ and $q$ are collinear, so $q\in N(p)$. But $q\not\in N(q)$, and we deduce that $N(p)\neq N(q)$.
Now suppose that $q$ is on $p^{opp}$. We have that $p$ is not on $p^{opp}$, so $p^{opp}\neq q^{opp}$.
Let $x\neq q$ be a point on $p^{opp}$. Then $x\in N(q)$, but $x\not\in N(p)$, so $N(p)\neq N(q)$.
\end{proof}
\subsection{Combinatorial configurations with unique closed neighborhoods}
In this section we give examples of combinatorial configurations with unique closed neighborhoods.
The parameters of any combinatorial $(v,b,r,k)$-configuration satisfies the inequality $v\geq r(k-1)+1$.
We have the equality $v=r(k-1)+1$ if and only if we have a linear space.
The \emph{deficiency} of a combinatorial $(v,b,r,k)$-configuration is the number $v-(r(k-1)+1)$.
\begin{proposition}
\label{prop:def1}
A combinatorial configuration with deficiency one has unique closed neighborhoods.
\end{proposition}
\begin{proof}
In a combinatorial configuration with deficiency one, for any point $p$ there is only one point in the complement of $CN(p)$, the \emph{anti-podal point} of $p$.
The anti-podal points come in pairs, so every point has a unique anti-podal point and therefore, a unique closed neighborhood.
\end{proof}
From the proof of Proposition~\ref{prop:def1} we also deduce that combinatorial configurations with deficiency one have 2-anonymous neighborhoods; any point and its anti-podal point share neighborhood.
It can be proved that if two points $p$ and $q$ in a pentagonal geometry share the same opposite line $l$,
then all points in $l$ will have the same opposite line: the line spanned by $p$ and $q$.
Such a pair of lines is called an opposite line pair.
\begin{proposition}
A pentagonal geometry with no pair of opposite lines has unique closed neighborhood.
\end{proposition}
\begin{proof}
For any point $p$ in the pentagonal geometry, the set of points on $p^{opp}$ is the complement of $CN(p)$.
If the pentagonal geometry has no opposite line pair, then all points have unique opposite lines, hence unique closed neighborhoods.
\end{proof}
\section{Combinatorial configurations with n-anonymous neighborhoods}
\label{sec:combconfnanneighbors}
In Section~\ref{sec:nconfnan} we saw that combinatorial configurations with $n$-anonymous neighborhoods are interesting for use with P2P UPIR 1.
\subsection{Examples of combinatorial configurations with \\n-anonymous neighborhoods}
Here we give an important example of a family of combinatorial configurations with $n$-anonymous neighborhoods.
\begin{proposition}\label{2.3:thm:transnan}
A transversal design $TD(k,n)$ has $n$-anonymous neighborhoods.
\end{proposition}
\begin{proof}
The point set of the transversal design can be partitioned into $k$ groups of cardinality $n$, such that the points in the same group are not collinear.
Any pair of points from different groups is contained in exactly one line.
This implies that the $n$ points in the same group all have the same neighborhood.
\end{proof}
The transversal design $TD(k,n)$ in this construction is a combinatorial $(nk,n^2,n,k)$-configuration.
Hence the construction provides a combinatorial configuration with $n$-anonymous neighborhoods that is suitable for $nk$ P2P UPIR users and requires the use of $n^2$ communication spaces.
As we saw in \ref{sec:transversaldesigns}, transversal designs can be constructed using latin squares and many transversal designs can be easily constructed using affine planes.
A transversal design constructed from an affine plane of order $q$ has parameters $(q^2,q^2,q,q)$.
The use of the affine plane of order $2$ gives an ordinary square with 4 points and 4 lines with 2 points on every line.
The use of the affine plane of order 3 gives the Pappus configuration.
\subsection{A characterization of the combinatorial configurations with n-anonymous neighborhoods}
We will now characterize the combinatorial configurations with $n$-anonymous neighborhoods exactly.
\begin{proposition}\label{thm:charnanonym}
A combinatorial $(v,b,r,k)$-configuration with $n$-anonymous neighborhoods is a combinatorial configuration that satisfies the following conditions:
\begin{itemize}
\item There exists a partition $G=\{g_i\}_{i=1}^{m}$ of the point set such that the points in the same part are not collinear, and $|g_i|\geq n$ for all $i\in [1,\dots,m]$;
\item We have that $r\geq n$ and $m\geq k$.
\end{itemize}
\end{proposition}
\begin{proof}
Let $C=(P,L,I)$ be a combinatorial configuration with $n$-anonymous neighborhoods.
Then every point $p\in P$ shares its neighborhood $N(p)$ with $n-1$ other points.
``Having the same neighborhood'' is a binary relation which is obviously
\begin{itemize}
\item reflexive ($p$ has the same neighborhood as $p$);
\item symmetric (if $N(p)=N(q)$ then $N(q)=N(p)$);
\item transitive (if $N(p_1)=N(p_2)$ and $N(p_2)=N(p_3)$, then \\$N(p_1)=N(p_3)$).
\end{itemize}
So it is an equivalence relation and defines a partition $G=\{g_1,\dots,g_m\}$ of the point set, in which $|g_i|\geq n$ for all $g_i\in G$.
We will call the parts $g_i\in G$ groups.
The neighborhood $N(p)$ of the point $p$ is defined as the set of points that are collinear with $p$, and different from $p$.
In particular, if two points $p$ and $q$ satisfy $N(p)=N(q)$,
then they are not collinear, since if they were, then $p\in N(q)$ which would imply $p\in N(p)$.
Therefore points in the same group are not collinear.
For the bound on $r$, consider a pair of collinear points $p$ and $q$.
Let $g$ be the group containing $p$.
All points in $g$ have the same neighborhood, so $q\in N(p')$ for every $p'\in g$.
No line contains two points in $g$, and we deduce that there are at least $|g|\geq n$ lines through $q$, so that $r\geq n$.
Regarding the number of points on every line $k$, we see that, since points in the same group are not collinear, it is clear that any line contains $k$ distinct points from $k$ distinct parts of $G$, so that $k\leq m$.
\end{proof}
There are indeed, $n$-anonymous combinatorial configurations which are not transversal designs.
\begin{example}
\label{ex:3an}
Consider the combinatorial $(36,72,6,3)$-configuration with point set $P=\{1,\dots,36\}$ and line set as in Table~\ref{table:conf}.
\begin{table}
\caption{Line set of a the combinatorial $(36,72,6,3)$ configuration in Example~\ref{ex:3an}.}
\label{table:conf}
\begin{center}
\begin{tabular}{llll}
$\begin{array}{c}
\{\{1,4,7\},\\
\{1,5,8\},\\
\{1,6,9\},\\
\{2,4,8\},\\
\{2,5,9\},\\
\{2,6,7\},\\
\{3,4,9\},\\
\{3,5,7\},\\
\{3,6,8\},\\
\{1,10,13\},\\
\{1,11,14\},\\
\{1,12,15\},\\
\{2,10,14\},\\
\{2,11,15\},\\
\{2,12,13\},\\
\{3,10,15\},\\
\{3,11,13\},\\
\{3,12,14\},\end{array}$
&
$\begin{array}{c}
\{4,16,19\},\\
\{4,17,20\},\\
\{4,18,21\},\\
\{5,16,20\},\\
\{5,17,21\},\\
\{5,18,19\},\\
\{6,16,21\},\\
\{6,17,19\},\\
\{6,18,20\},\\
\{7,22,25\},\\
\{7,23,26\},\\
\{7,24,27\},\\
\{8,22,26\},\\
\{8,23,27\},\\
\{8,24,25\},\\
\{9,22,27\},\\
\{9,23,25\},\\
\{9,24,26\},\end{array}$
&
$\begin{array}{c}
\{10,28,31\},\\
\{10,29,32\},\\
\{10,30,33\},\\
\{11,28,32\},\\
\{11,29,33\},\\
\{11,30,31\},\\
\{12,28,33\},\\
\{12,29,31\},\\
\{12,30,32\},\\
\{13,16,34\},\\
\{13,17,35\},\\
\{13,18,36\},\\
\{14,16,35\},\\
\{14,17,36\},\\
\{14,18,34\},\\
\{15,16,36\},\\
\{15,17,34\},\\
\{15,18,35\},\end{array}$
&
$\begin{array}{c}
\{19,22,31\},\\
\{19,23,32\},\\
\{19,24,33\},\\
\{20,22,32\},\\
\{20,23,33\},\\
\{20,24,31\},\\
\{21,22,33\},\\
\{21,23,31\},\\
\{21,24,32\},\\
\{25,28,34\},\\
\{25,29,35\},\\
\{25,30,36\},\\
\{26,28,35\},\\
\{26,29,36\},\\
\{26,30,34\},\\
\{27,28,36\},\\
\{27,29,34\},\\
\{27,30,35\}\}\end{array}$
\end{tabular}
\end{center}
\end{table}
It is clear that this combinatorial $(36,72,6,3)$-configuration is $3$-anonymous, but $k=3<12=m$ and $r=6>3=n$.
We also observe that $rk=18$ divides $v=36$ and $b=72$.
The groups in the partition are given by Table~\ref{table:groups}.
\begin{table}
\caption{The partition of the point set into anonymity sets of the combinatorial$(36,72,6,3)$-configuration with 3-anonymous neighborhoods in Example~\ref{ex:3an}.}
\label{table:groups}
\begin{center}
\begin{tabular}{ccccc}
$\{\{1,2,3\},$&$\{4,5,6\},$&$\{7,8,9\},$&$\{10,11,12\},$\\
$\{13,14,15\},$&$\{16,17,18\},$&$\{19,20,21\},$&$\{22,23,24\},$\\
$\{25,26,27\},$&$\{28,29,30\},$&$\{31,32,33\},$&$\{34,35,36\}\}$
\end{tabular}\end{center}
\end{table}
\end{example}
\subsection{Optimal configurations for $n$-anonymous P2P UPIR 1}
The privacy provided to the users of $n$-anonymous P2P UPIR 1 is $n$-confusion, where $n$ is the cardinality of the anonymity sets.
The points in the same anonymity set have the same neighborhood.
As we saw in Proposition~\ref{prop:p2pnconf}, if query sequences cannot be linked by content, then we have $r(k-1)$-confusion, since this is the cardinality of the neighborhood of the proxy of a single query.
It is therefore interesting to maximize both $n$ and $r(k-1)$, although which one is the most important may depend on the context.
In a combinatorial configuration with $n$-anonymous neighborhoods, the anonymity set and the neighborhood of a point are disjoint.
Therefore, maximizing $n$ and $r(k-1)$ simultaneously is the same as requiring $v=n+r(k-1)$, so that the anonymity set and the neighborhood together form the entire point set of the configuration.
It is easy to see that a transversal design satisfies this condition,
since a point $p$ in a transversal design is neighbor with all points that are not in the anonymity set of $p$.
Indeed, if we in Lemma~\ref{thm:charnanonym}
add a restriction on regularity of the group cardinalities, and maximize $n$, what we get are exactly the transversal designs.
\begin{theorem}
In a combinatorial $(v,b,r,k)$-configuration $C$ with $n$-anonymous neighborhoods and anonymity partition $G=\{g_i\}_{i=1}^m$ and $|g_i|=n$ for all $i\in [1,\dots,m]$,
we have that
\begin{center}$r=n$ if and only if $m=k$.\end{center}
In this case $C$ is a transversal design $TD(k,n)$ and $v=kn$, $b=n^2$.
\end{theorem}
\begin{proof}
Since the configuration is connected, if $r=n$, then necessarily $k=m$.
On the other hand, if $k=m$, then necessarily $r=n$, since if we fix one part $g\in G$ and a point $p\in g$,
then a line through $g$ has $k$ points through $k=m$ distinct parts $g\in G$, so the line have one point in every part in $G$.
For any part $g'\in G$ different from $g$ there is also a total of $n$ lines through $p$.
Since these lines have one point in every part of $G$, we get $r=n$.
A transversal design is a uniform group divisible design in which the number of groups $|G|$ equals the length of the blocks $k$.
We have seen that an $n$-anonymous combinatorial $(v,b,n,m)$-configuration such that $|g_i|=n$ and $m=k$ satisfy exactly these conditions,
so it is a transversal design $TD(k,n)$.
\end{proof}
\section{Combinatorial configurations with n-anonymous closed neighborhoods}
\label{sec:nanonymousclosedneighborhoods}
In Section~\ref{sec:nconfnan} we saw that combinatorial configurations with $n$-anonymous closed neighborhoods are interesting for P2P UPIR 2.
By now, the reader is already familiar with the most important example of combinatorial configurations with $n$-anonymous closed neighborhoods.
\begin{proposition}
\label{prop:lin_anon}
A linear space on $n$ points has $n$-anonymous closed neighborhoods.
\end{proposition}
\begin{proof}
In a linear space all points are collinear.
\end{proof}
\subsection{Combinatorial configurations with n-anonymous closed neighborhoods from combinatorial configurations with n-anonymous neighborhoods}
\label{sec:suboptimalnanonym}
After all, there is not much difference between the definition of $n$-anonymous neighborhoods and the definition of $n$-anonymous closed neighborhoods.
The next Theorem~\ref{thm:nanonym(III)from(I)} shows that we can use combinatorial configurations with the former property to construct combinatorial configurations with the latter property.
\begin{theorem}\label{thm:nanonym(III)from(I)}
Let $C$ be a combinatorial $(v,b,r,k)$-configuration with $n$-anonymous neighborhoods such that $k|n$.
Then there also exists a combinatorial $(v,b+n,r+1,k)$-configuration $C'$ with $n$-anonymous closed neighborhoods.
\end{theorem}
\begin{proof}
Let $C$ be as stated above. Then every point shares neighborhood with exactly $n$ more points.
Theorem~\ref{thm:charnanonym} implies that in $C$ there is a partition $G$ of the point set so that points in the same partition are the points with the same neighborhood.
This implies that points in the same partition are not collinear.
Define $C'$ by adding $k\frac{n}{k}=n$ new lines,
so that every new line contains only points from the same part of $G$.
Let $A$ be a set of points with the same neighborhood in $C$.
For any $p\in A$ there are $k-1$ other points $p_1,\ldots,p_{k-1}$ in $A$
collinear with $p$ by one of the new lines, such that $CN(p)=CN(p_i)$ for $i=1,\ldots,k-1$.
This concludes the proof.
\end{proof}
As a corollary of Theorem~\ref{thm:nanonym(III)from(I)} we get that an affine plane of order $k$ is a combinatorial configuration with $k$-anonymous closed neighborhoods.
Just apply the construction in the proof of Theorem~\ref{thm:nanonym(III)from(I)} to a transversal design $TD(k,k)$.
But an affine plane of order $k$ is a linear space on $v=k^2$ points,
so we already know from Proposition~\ref{prop:lin_anon} that it is a $k^2$-anonymous combinatorial configuration for P2P UPIR 2.
Indeed, $n$-anonymity implies $m$-anonymity for all $m\leq n$.
Observe though that in general the combinatorial $(r,k)$-configuration constructed in Theorem~\ref{thm:nanonym(III)from(I)} is $k$-anonymous but not $m$-anonymous for $m>k$.
Not all combinatorial configurations with $n$-anonymous closed neighborhoods can be obtained using the construction in Theorem~\ref{thm:nanonym(III)from(I)}.
For example, we can not use this method to construct a finite projective plane.
\subsection{Optimal configurations for P2P UPIR 2
The P2P UPIR 2 was designed to provide $n$-confusion with linear spaces. We have the following result.
\begin{theorem}\label{thm:optpp}
A regular linear space on $v$ points provides $n$-confusing P2P UPIR 2 with $n=v$.
This is optimal.
\end{theorem}
\begin{proof}
It is immediate that a linear space provides $n$-confusing P2P UPIR 2 with $n=v$.
Since the confusion can not be larger than the total number of points $v$ in the configuration, this is optimal.
\end{proof}
A linear space is a $(v,k,1)$-BIBD. More generally, in a $(v,k,\lambda)$-BIBD any two points are connected by $\lambda\geq 1$ lines, so also in this case we have optimal $n$-confusing P2P UPIR 2.
However, then the BIBD is not a combinatorial configuration.
In this article we have provided reasons that justify the use of combinatorial configurations for P2P UPIR.
As was observed in \cite{SwansonStinson}, other incidence structures are also interesting, in particular if it is assumed that colluding users can communicate over channels that are exterior to the protocol.
\section{Conclusions}
We have presented two different P2P UPIR protocols, P2P UPIR 1 in which the users do not self-submit and P2P UPIR 2 in which they do.
Then we described an attack on P2P UPIR 1, based on unique neighborhoods,
and adjusted the self-submission for P2P UPIR 2 in order to avoid neighborhood attacks on linear spaces.
We also showed that P2P UPIR 2 is still vulnerable to closed neighborhood attacks, if the closed neighborhoods in the combinatorial configurations are unique.
We gave examples of combinatorial configurations with unique neighborhoods and unique closed neighborhoods.
Then we presented the combinatorial configurations with $k$-anonymous neighborhoods and $k$-anonymous closed neighborhoods, respectively.
We characterized, as $n$-confusion, the privacy provided by a P2P UPIR protocol that uses one of the combinatorial configurations from these families.
Finally we studied the combinatorial configurations with $k$-anonymous neighborhoods and $k$-anonymous closed neighborhoods.
We distinguished the transversal designs and the linear spaces as optimal configurations for P2P UPIR from these two families, respectively.
We want to point out that there are two trivial ways to connect communication spaces and users; the all-to-all and the one-to-all distributions.
As combinatorial structures both can be interpreted as degenerated linear spaces, since every pair of points (users) share exactly one line (communication space).
Used in the P2P UPIR 2 protocol, they provide the same anonymity in front of the server as does a linear space with the same number of points.
However, with respect to other users, there is no anonymity in the all-to-all distribution and confidentiality is lost in the all-to-one distribution.
Consequently, the reason why non-degenerated combinatorial configurations are interesting for P2P UPIR 2, is because they offer some anonymity and confidentiality with respect to the other users in the community.
\section*{Acknowledgements}
The authors want to thank Maria Bras-Amor\'os, Douglas R. Stinson, Colleen Swanson, Vicen\c{c} Torra and the anonymous referees for useful discussions and suggestions.
Partial support by the Spanish MEC projects ARES (CONSOLIDER ~INGENIO ~2010 ~~CSD2007-00004) and RIPUP (TIN2009-11689) is acknowledged. The authors are with the UNESCO Chair in Data Privacy, but their views do not necessarily reflect those of UNESCO nor commit that organization.
\bibliographystyle{plain}
|
{
"timestamp": "2012-12-10T02:02:27",
"yymm": "1206",
"arxiv_id": "1206.5930",
"language": "en",
"url": "https://arxiv.org/abs/1206.5930"
}
|
\section{Introduction}
\citet{ma81,no84,kou93}; and \citet{apt98} have suggested the division
of gamma-ray bursts (GRBs) into two categories, either short or long,
according to their duration (at $\sim$\,2\,s). Many observations
demonstrate different properties of short and long bursts.
They have different redshift distributions \citep{ba06,os08}
and may have different celestial distributions \citep{bal98,bal99,mes00,lit01,mes03,vav08}.
At present there is a predominant opinion that they are physically different
phenomena \citep{nor01,bal03,fox05,kan11}.
There are also statistical indications of a third, ``intermediate", group.
The division of GRBs into three groups has been studied statistically over
different databases: BATSE \citep{ho98,mu98,bala01,ho02,ho06,cha07}; BeppoSAX \citep{ho09};
Swift \citep{ho08,hu09,ho10,ve10} and RHESSI \citep{rip09}.
These three groups may also have different celestial distributions \citep{mes00,vav08};
at least for the BATSE database.
No test has given a statistically significant support for
the existence of four or more groups.
Only the BATSE database gave a weak 6.2\,\% significance level
for such a possibility \citep{ho06}.
One cannot exclude an eventuality that the
separation of this third group
is simply a selection effect \citep{hak00,ra02}.
In other words, a separation from the statistical
point of view does not necessarily also indicate astrophysically different phenomena. In
principle, it is still possible that the class of the intermediate GRBs constitutes
a ``tail" of either the short or the long group. The article by \citet{ve10} claims that
- at least for the Swift database \citep{sak08} - the third group is related to the so-called X-Ray
Flashes (XRFs), which need not be physically distinct phenomena \citep{kip03,so06}.
Two models of XRFs are favored; either they are ordinary long GRBs viewed slightly off-axis
\citep{zhg02} or they are intrinsically soft long-duration GRBs \citep{gen07}.
Hence, at least in the Swift database, the problem of the
intermediate class seems to have been solved.
However, for three reasons the situation has not yet been clarified.
First, with regard to the Swift database,
another study suggests that even the short group
should be further separated \citep{sak09}.
Secondly, there is additional observational evidence
against the simple scheme that maintains the existence of
{\it only} two types of bursts (short/hard and
long/soft) separated at duration of $\sim$2\,s.
The GRB060614 event, which is clearly long at duration
($\simeq$\,100\,s) but in any other properties resembles a short GRB,
and subsequent short bursts with soft extended emission yet challenged this
scheme \citep{ge06}. To avoid the limitations of a short-long separation
terminology, the designations
``Type I" and ``Type II" have been proposed \citep{zhg06,zhg09,kan11} because
duration alone is hardly sufficient for a correct division into categories.
Thirdly, it remains possible that in other databases the
discovered intermediate group is not represented by XRFs.
Concerning this third reason,
the mean duration of the intermediate group appears to vary according to
the database in which it is found.
For the Swift data \citep{ho08,hu09} the mean duration is
$\sim$\,12\,s, which resembles the durations of the long GRBs, but for the
RHESSI and BATSE data \citep{ho98,mu98,ho06,rip09} this mean is far below 10\,s.
It is clear that any new result in the classification scheme of
GRB groups is desirable. In this article we
study the RHESSI database, where in addition to \citet{rip09},
the spectral lags and peak-counts are also included.
We have two concrete aims here:
First, to provide further statistical tests concerning the GRB classes and, second,
to provide additional information concerning the physical significance
of the RHESSI intermediate group found by \citet{rip09}.
The paper is organized as follows:
In Sec.~\ref{sec:sample} the RHESSI satellite and its GRB data sample are described.
In Sec.~\ref{sec:KS} distributions of spectral lags, normalized lags, and peak-count rates
are studied using Kolmogorov-Smirnov and Anderson-Darling tests along with Monte Carlo simulations.
In Sec.~\ref{sec:dis} we discuss results of these tests, compare the results with the BATSE and
Swift data samples, and discuss the number of GRB groups using model-based and K-means clustering methods.
Sec.~\ref{sec:sum} summarizes results of this paper.
\section{The RHESSI data sample}
\label{sec:sample}
The Ramaty High Energy Solar Spectroscopic Imager
\footnote{\url http://hesperia.gsfc.nasa.gov/hessi}$^,$\footnote{\url http://grb.web.psi.ch}
(RHESSI) is a satellite designed for the observation of hard X-rays and gamma-rays from solar
flares \citep{lin02}, but it is also able to detect GRBs.
There is no automatic search routine for GRBs,
and only if a message from any other instruments of the International Planetary Network (IPN)
occurs, the RHESSI data are searched for a GRB signal. Therefore, our data set includes
only events confirmed by other satellites.
In this paper we study the same list of bursts which has been published in
\citet{rip09}. We consider 427 GRBs from period between
February 14, 2002 and April 25, 2008.
Contrary to \citet{rip09}, the spectral lags and the peak-counts
- calculated for the first time for RHESSI -
next to the durations and hardnesses, are used.
They are collected, together with their uncertainties, in Table~\ref{tab:database}.
These new observational data allow further study of the questions concerning
the GRB classification. There are two arguments for the choice of the same list of bursts.
First, both in \citet{rip09} and in the present work, similar
statistical studies are performed. Hence, for comparison, it is reasonable to study
the structure of groups found in the RHESSI database over the same set.
The second argument concerns an instrumental effect.
The measurements of the hardness ratio of the events during the year 2008 and later has
been systematically affected by an ``annealing"
procedure\footnote{\url http://hesperia.gsfc.nasa.gov/hessi/news/jan\_16\_08.htm}
executed on the RHESSI detectors at late 2007 \citep{bel08}.
The reason why the RHESSI team decided to anneal the detectors was to recover its deteriorating
spectral sensitivity. However, the sensitivity at low energies had not been recovered as well as
at high energies and hence the measured GRB hardness ratios from the post-annealing period are
systematically shifted to higher values \citep{ver09,rip10}. In order to be eliminate this instrumental
influence a more sophisticated modeling would be required.
However, this is beyond the scope of this article.
In order to compare the spectral lags and the peak-counts of bursts belonging to the
different groups, one must provide a rule by which the particular GRBs are sorted into the concrete
groups. We proceeded in the following manner: The probability density function
employed in the fitting of the duration-hardness plane in \citet{rip09} is composed through the summation
of three bivariate log-normal
functions $f(x,y)=f_1(x,y)+f_2(x,y)+f_3(x,y)$, where $x$ is the base 10 logarithm of the duration
and $y$ is the base 10 logarithm of the hardness ratio, and $f_1$, $f_2$, and $f_3$ are components
corresponding to the particular groups. A burst at the point $[x_0;y_0]$ is considered short, intermediate
or long depending on whether the $f_1(x_0,y_0)$, $f_2(x_0,y_0)$ or $f_3(x_0,y_0)$ is maximal. In essence,
we follow a procedure identical to the that of
\citet{ho06} and \citet{ho10} utilized on the BATSE and Swift datasets.
In order to sort the given GRBs into the groups we employ
the measurements of the durations and hardness ratios as given
in Table~7 of \citet{rip09} with the exception of six events.
We found that for these six events the mentioned values in \citet{rip09} were
not corrected for a so-called decimation, which is an instrumental mode used to
conserve the onboard memory.
Table~\ref{tab:dec-corr} presents these six events, now corrected for this decimation.
The group members used in this study were determined from the best Maximum
Likelihood (ML) fit \citep{rip09} in the duration-hardness plane of 427 GRBs. In this sample the
six events with corrected decimation were included along with the remaining
421 events taken from \citet{rip09}.
The best ML fit with
two bivariate lognormal components gives logarithmic likelihood $\mathrm{ln} L_2 = -313.4$.
Best fit with three components gives logarithmic likelihood $\mathrm{ln} L_3 = -303.4$.
The ML ratio test tells that the twice of difference in the logarithms of the likelihoods
$2(\mathrm{ln} L_3 - \mathrm{ln} L_2) = 20.0$ should follow $\chi^2$ distribution with 6
degrees of freedom \citep{ho06}. Therefore ML ratio test, employed in \citet{rip09} and now applied on
the duration-hardness plane with these 427 GRBs including the six events corrected for decimation,
gives again a statistically significant intermediate group at the significance level of 0.3\,\%.
The new (former) best-fit model parameters of the intermediate group are: 0.12 (0.11) for the mean
logarithmic duration, 0.25 (0.27) for the mean logarithmic hardness, 4.1 (5.3\,\%) for the weight,
and 0.0 (0.59) for the correlation coefficient. The group members are shown in Fig.~\ref{fig:groups}
and listed in Table~\ref{tab:database}.
\begin{deluxetable}{lccc}
\tabletypesize{\scriptsize}
\tablecaption{Six RHESSI GRBs with corrected $T_{90}$ durations and
hardness ratios.\label{tab:dec-corr}}
\tablewidth{0pt}
\tablehead{
\colhead{GRB\tablenotemark{a}} &
\colhead{Peak time\tablenotemark{b}} &
\colhead{$T_{90}$ (s)\tablenotemark{c}} &
\colhead{Hardness ratio log $H$\tablenotemark{d}}
}
\startdata
030518B & 03:12:23.050 & (1.86$\pm$0.07)E+1 & (2.90$\pm$0.27)E-1 \\
030519A & 09:32:22.500 & (3.20$\pm$0.27)E+0 & (5.31$\pm$0.61)E-1 \\
031024 & 09:24:14.350 & (4.30$\pm$0.17)E+0 & -(2.06$\pm$0.31)E-1 \\
040220 & 00:55:15.800 & (1.80$\pm$0.07)E+1 & (9.39$\pm$2.72)E-2 \\
050216 & 07:26:34.275 & (4.50$\pm$0.56)E-1 & (2.33$\pm$0.48)E-1 \\
050530 & 04:44:44.900 & (2.40$\pm$0.26)E+0 & (2.41$\pm$0.63)E-1 \\
\enddata
\tablenotetext{a}{RHESSI GRB number.}
\tablenotetext{b}{Peak time of the count light-curve in UTC.}
\tablenotetext{c}{The uncertainties were calculated
though the same procedure used in \citet{rip09}.}
\tablenotetext{d}{The hardness ratio was defined as the ratio of GRB counts
at two different bands, $H=S_{(120-1500)\mathrm{keV}}/S_{(25-120)\mathrm{keV}}$.}
\end{deluxetable}
\begin{figure}[h]
\centering
\includegraphics[trim=6mm 2mm 5mm 6mm,clip=true,width=0.6\columnwidth]{groups.eps}
\caption{The hardness ratio $H$ plotted against the duration $T_{90}$
for the RHESSI database with the best ML fit of three bivariate log-normal functions.
The different GRB group members are denoted with different symbols:
the crosses, full circles, and triangles correspond, respectively, to the short, intermediate,
and long bursts. CL means ``confidence level".}
\label{fig:groups}
\end{figure}
The spectral lags $L$ of the RHESSI data were calculated by ourselves
by fitting the peak of the cross-correlation function (CCF) of the
background-subtracted count light-curves at two channels, $400-1500$\,keV and $25-120$\,keV, by
a third order polynomial. The position of the maximum of the polynomial fit measures
the spectral lag. An example of such a fit is shown in Fig.~\ref{fig:ccf}.
The method is similar to that employed in the previous studies
\citet{nor00,nor02,fol08}; and \citet{fol09}
on the BATSE and INTEGRAL data. This is the first time that the spectral
lags have been calculated for the RHESSI GRBs.
\begin{figure}[h]
\centering
\includegraphics[trim=6mm 3mm 4mm 6mm,clip=true,width=0.8\textwidth]{ccf.eps}
\caption{\emph{Left}: An example of the cross-correlation function of two background-subtracted count
light-curves of the very bright GRB~060306 derived at two energy bands $400-1500$\,keV and $25-120$\,keV.
\emph{Right}: A detail of the same curve with the third order polynomial fit (thick solid curve).
The position of
the maximum of the fit measures the spectral lag (dotted line).
The boundaries of the polynomial fit are marked with dashed lines.}
\label{fig:ccf}
\end{figure}
To obtain statistical errors, a Monte Carlo (MC) method was utilized.
The following procedure was employed to prepare 1001 synthetic count profiles
for each GRB:
The measured count profiles were randomly influenced by Poisson noise,
after which the background was subtracted.
The RHESSI count rates are sometimes ``decimated'', which means that, as the rate becomes
too high or the onboard solid-state recorder becomes too full,
a part of the recorded counts is removed.
If decimation occurs, the fraction $(f_\mathrm{d}-1)/f_\mathrm{d}$
of the counts below a decimation energy $E_0$ is removed. $f_\mathrm{d}$ is the
decimation factor (weight), usually equal to 4 or 6. All events above $E_0$ are
downlinked\footnote{\url http://sprg.ssl.berkeley.edu/\textasciitilde dsmith/hessi/decimationrecord.html}.
To prepare the synthetic count profiles, the number of counts in each bin was changed accordingly
to the Poisson distribution. The 1-sigma errors for
non-decimated, fully decimated, and partially decimated data are
$\sqrt{C}$, $\sqrt{f_\mathrm{d}.C_\mathrm{dc}}$,
and $\sqrt{C_1+f_\mathrm{d}.C_\mathrm{2,dc}}$, respectively.
$C$ is the measured count number in a bin for non-decimated data.
$C_\mathrm{dc}$ is the count number in a bin of fully decimated data and consequently
corrected for this decimation.
$C_1$ is the count number in the non-decimated portion and $C_\mathrm{2,dc}$ is the corrected count
number in the decimated portion of the measured rate in the case of partially decimated data.
A detailed explanation is provided in Appendix~\ref{app:1}.
The CCF was fitted for each of the 1001 synthetic profiles and for each burst in our sample.
The median of such a distribution of 1001 maxima of polynomial fits
was taken as the true lag $L$ for each burst.
Theses median lags $L$ are used in the following statistical tests and listed in Table~\ref{tab:database}.
The 2.5\,\% and 97.5\,\% quantiles of such a distribution of 1001 maxima of polynomial fits for each GRB
delimit the 95\,\% CL statistical errors. These errors are also listed in Table~\ref{tab:database}.
We decided to calculate the spectral lags only for bursts with a signal-to-noise ratio
higher than 3.5 in both channels. This signal-to-noise ratio is
defined as $S_{\mathrm T90}/\sqrt{S_{\mathrm T90}+2B_{\mathrm T90}}$, where $S_{\mathrm T90}$ is a
GRB signal over the background level $B_{\mathrm T90}$, and both $S$ and $B$
are counts in a $T_{90}$ time interval over the range 25\,keV$-$1.5\,MeV. The choice of this limit was
found to ensure that the CCF was sufficiently smooth with a clear peak
allowing determination of a reliable lag.
Therefore, excluding the noisiest data, the number of GRBs with calculated lags is 142.
Their distribution is presented in Fig.~\ref{fig:lags}.
The GRB peak-count number $S$ was derived from the light-curve with the maximal count number $C$ at
the range 25\,keV$-$1.5\,MeV after subtracting the background $B$. The peak-count rate $F$ is given as
the peak-count number $S$ divided by the width of the time bin $\delta t_{\mathrm res}$. This width was
different for different GRBs, and covered a range between 2\,ms and 3\,s.
The dimension of the peak-count rate is count/s.
The one sigma error $\sigma_{\mathrm F}$ of the peak-count rate $F$ was calculated as
$\sigma_{\mathrm F}=\sigma_{\mathrm S}/\delta t_{\mathrm res}$, where the error $\sigma_{\mathrm S}$ of
the GRB peak-count number is $\sigma_{\mathrm S}=\sqrt{(\sigma_{\mathrm C})^2+(\sigma_{\mathrm B})^2}$.
We assume that errors of the maximal count numbers $\sigma_{\mathrm C}$ and of the background
$\sigma_{\mathrm B}=\sqrt{B}$ are Poissonian and independent. The error $\sigma_{\mathrm C}$ is:
$\sigma_{\mathrm C}=\sqrt C$ in case of non-decimated data; given by expression (\ref{eq:full-dec})
in case of fully decimated data; and given by expression (\ref{eq:part-dec}) in case of partially
decimated data (see Appendix~\ref{app:1}). The peak-counts with errors were calculated for all 427 objects.
\section{Properties of the GRB groups}
\label{sec:KS}
\subsection{Distribution of spectral lags}
In this section we use Anderson-Darling (A-D) test \citep{and52,dar57}
to compare distributions of spectral lags of different GRB groups (see Fig.~\ref{fig:lags})
found by the ML method applied on durations and hardness ratios (see Sec.~\ref{sec:sample}).
The short (intermediate, long) group contains 26 (11, 105) objects. The mean values of
the spectral lags of these groups are similar, hence we use the A-D test because it is
particularly sensitive to the tails of the tested distributions \citep{sch87}.
For its calculation we employ \emph{adk} package of the R software\footnote{http://cran.r-project.org} \citep{R}.
The results are summarized in Tab.~\ref{tab:lags}.
\begin{figure}[h]
\centering
\includegraphics[width=1.0\textwidth]{lags.eps}
\caption{\emph{Left panels:}
The spectral lags of RHESSI GRBs sorted along the $y$-axis with respect
to the value of (+error + $|$-error$|$) for short-, intermediate- and
long-duration bursts. The median lags, for each GRB, were taken from the
lags of 1001 synthetic background-subtracted count time profiles obtained
by Monte Carlo simulations of the measured profiles that were randomly
influenced by the Poissonian noise. The error bars are composed of the
95\,\%~CL statistical error and the profile time resolution. A positive
lag means that the low-energy counts are delayed. \emph{Right panels:}
The cumulative distributions of the obtained median lags for the three
groups of bursts are shown.
}
\label{fig:lags}
\end{figure}
\begin{table}[h]
\centering
$
\begin{array}{cccc}
\begin{tabular}{ccc}
\hline\hline
Groups & A-D $P$ \\
& (\%) \\
\hline
Inter.-Short & 16.8 \\
Inter.-Long & 4.2 \\
Short-Long & $<10^{-3}$ \\
\hline
\end{tabular}
\begin{tabular*}{1.0cm}{c}
\\
\end{tabular*}
\begin{tabular}{cccc}
\hline\hline
Group & Mean $L$ & Median $L$ & $\sigma$ \\
& (ms) & (ms) & (ms) \\
\hline
Short & 4.9 & 1.9 & 16.7 \\
Inter. & 28.7 & 5.9 & 78.4 \\
Long & 178.0 & 50.8 & 874.9 \\
\hline
\end{tabular}
\end{array}$
\caption{
\emph{Left part:}
Results from the A-D tests of the spectral lag distributions for
the RHESSI database, are presented. The null hypothesis is that the two samples
are drawn from the same distribution. $P$ denotes the P-value of the test.
\emph{Right part:}
The means, medians and standard deviations $\sigma$ of the lags are listed.}
\label{tab:lags}
\end{table}
The A-D test gives a significance of 16.8\,\% (the probability that the
two samples are drawn from the same distribution) for the short-intermediate
pair, and it yields a significance of 4.2\,\% for the long-intermediate pair.
Therefore, in case of short and intermediate groups, we cannot reject the null
hypothesis that the two samples are drawn from the same distribution on a sufficiently
low level (5\,\%). On the other hand, this null hypothesis can be rejected in the case
of long and intermediate groups, but the significancy is not far below 5\,\% level.
The same test applied on the lags of the short-long pair yields a significance of
$<10^{-3}$\,\%. Therefore, in this case, the null hypothesis can be rejected
with a high significance. This strongly supports the well-known claim that the short and
long GRBs are really different phenomena and confirms the results of \citet{nor01}
(obtained with BATSE), but now by using the RHESSI instrument.
\subsection{Distribution of normalized lags}
In this section we compare the distributions of normalized lags (Fig.~\ref{fig:normlags}),
i.e. $L/T_{90}$, next to the absolute values of the lags. Again we use the A-D test between
the different GRB groups mentioned in the previous section. The number of events within the
groups is therefore the same. The results are summarized in Tab.~\ref{tab:normlags}.
The A-D test gives the significance level of 54.2\,\%
for the short-intermediate pair and it gives the significance
level of 45.0\,\% for the long-intermediate pair. The significances are considerably above 5\,\% level,
therefore the null hypothesis that the samples are drawn from the same distribution cannot be rejected.
For the short-long, pair the A-D test gives the significance level of 6.0\,\%.
If the normalized lags are concerned, the difference between the short and long bursts is not definite.
\begin{figure}[h]
\centering
\includegraphics[trim=8mm 3mm 3mm 7mm,clip=true,width=0.48\textwidth]{cumulative_distr_normlags.eps}
\caption{The cumulative distributions of the normalized lags for the three RHESSI
GRB groups.}
\label{fig:normlags}
\end{figure}
\begin{table}[h]
\centering
$
\begin{array}{cccc}
\begin{tabular}{ccc}
\hline\hline
Groups & A-D $P$ \\
& (\%) \\
\hline
Inter.-Short & 54.2 \\
Inter.-Long & 45.0 \\
Short-Long & 6.0 \\
\hline
\end{tabular}
\begin{tabular*}{1.0cm}{c}
\\
\end{tabular*}
\begin{tabular}{cccc}
\hline\hline
Group & Mean & Median & $\sigma$ \\
& $L$(ms)/$T_{90}$(s) & $L$(ms)/$T_{90}$(s) & \\
\hline
Short & 21.3 & 15.8 & 63.3 \\
Inter. & 17.6 & 5.5 & 63.0 \\
Long & 10.2 & 3.4 & 32.0 \\
\hline
\end{tabular}
\end{array}$
\caption{
Results of the A-D tests of the equality of the normalized lag
distributions between different RHESSI GRB groups are listed.
$P$ denotes the P-value of the test.
\emph{Right part:}
The means, medians and standard deviations $\sigma$ of the normalized lags are also mentioned.}
\label{tab:normlags}
\end{table}
\subsection{Distribution of peak-counts}
Here we used Kolmogorov-Smirnov (K-S) test \citep{kol33,smir48} to compare the
cumulative distributions of the peak-counts among the different GRB groups.
The short (intermediate, long) group contains 42 (18, 367)
objects. The results are presented in Tab.~\ref{tab:peak_count_rates} and shown in
Fig.~\ref{fig:peak_count_rates}.
\begin{figure}[h]
\centering
$
\begin{array}{cc}
\includegraphics[trim=0mm 2mm 1mm 2mm,clip=true,width=0.48\textwidth]{peak_fluxes_T90.eps}
&
\includegraphics[trim=0mm 2mm 1mm 2mm,clip=true,width=0.48\textwidth]{cumulative_distr_peak_fluxes.eps}
\end{array}$
\caption{
\emph{Left panel:}
Peak-count rates $F$ of RHESSI GRBs as a function of $T_{90}$ durations for the three GRB groups,
identified by the analysis of the hardnesses and durations, are displayed.
\emph{Right panel:}
Cumulative distributions of these peak-count rates $F$ for the short-, intermediate-,
and long-duration bursts are shown.
}
\label{fig:peak_count_rates}
\end{figure}
\begin{table}[h]
\centering
$
\begin{array}{cccc}
\begin{tabular}{ccc}
\hline\hline
Groups & $D$ & K-S $P$ \\
& & (\%) \\
\hline
Inter.-Short & 0.44 & 0.9 \\
Inter.-Long & 0.55 & 3$\times$10$^{-5}$ \\
Short-Long & 0.69 & $<10^{-6}$ \\
\hline
\end{tabular}
\begin{tabular*}{1.0cm}{c}
\\
\end{tabular*}
\begin{tabular}{cccc}
\hline\hline
Group & Mean & Median & $\sigma$ \\
& $F$\,(s$^{-1}$) & $F$\,(s$^{-1}$) & (s$^{-1}$) \\
\hline
Short & 9\,485 & 5\,163 & 20\,418 \\
Inter.& 4\,412 & 2\,546 & 5\,586 \\
Long & 2\,589 & 1\,038 & 7\,673 \\
\hline
\end{tabular}
\end{array}$
\caption{
\emph{Left part:}
Results of the K-S test applied on the peak-count rates $F$ for the RHESSI database.
The K-S distance $D$ and the K-S significance $P$ are mentioned.
\emph{Right part:}
The means, medians, and standard deviations of the peak-count rates are listed.}
\label{tab:peak_count_rates}
\end{table}
The results of the K-S tests imply that the distributions of the
peak-count rates are different over all three groups. Particularly, the K-S significance
level for the intermediate vs. short busts is 0.9\,\%, for intermediate vs.
long bursts it is 3$\times$10$^{-5}\,\%$,
and for short vs. long bursts it is $<10^{-6}\,\%$.
\subsection{Monte Carlo simulations}
\label{sec:mc-sim}
In order to test the robustness of the results obtained by the A-D tests applied on
lags $L$, normalized lags $L/T_{90}$ and K-S tests applied on peak-count rates $F$,
one can use Monte Carlo method.
In case of spectral lags we proceeded in the following way: The procedure described in
Sec.~\ref{sec:sample} - calculation of statistical errors of the lags by applying of Poisson noise -
provided distribution of 1\,001 lags for each GRB. Thus for each GRB we randomly selected
one lag from its distribution and made 10\,000 data samples.
Then the A-D tests for these 10\,000 samples were calculated.
In case of peak rates, we proceeded as below: We applied the Poisson noise to the
measured light curves and subtracted the background in order to obtain the simulated data.
Then we derived the peak count rate for the same peak time when the peak was found in the
measured light curves. We proceed in this way for each GRB. Afterwards we calculated K-S tests and
repeated this sequence 10\,000 times.
The number of cases when the A-D and K-S probability reached higher values than 5\,\% for tests
done on different pairs of GRB groups is noted in Tab.~\ref{tab:montecarlo}. The results of
MC simulations comparing spectral lags and normalized lags are shown in Fig.~\ref{fig:lags_MC}.
The MC method confirms that the distributions of spectral lags between short
and long GRB groups are different. Let's compare results from the MC simulations of lags
and normalized lags between the intermediate-short and intermediate-long pairs
with the results of the tests applied directly on median lags (Tab.~\ref{tab:lags})
and median normalized lags (Tab.~\ref{tab:normlags}). Then one can see that MC simulations
gives A-D prob. $>$ 5\,\% more often then expected. It can be caused by the fact that for some
GRBs, weak and noisy ones, the distribution of lags found by MC method might not
follow the real distribution because after applying the Poisson noise the polynomial fit of
CCF may not well describe the CCF peak. The reason for this conjecture is that the fitting
range remained fixed and same for the simulated data as for the measured data. In other words
the suitable fitting range for the measured data need not be suitable for the simulated data.
In this case we think that the A-D tests applied on the median lags give more reliable results
than the MC simulations do. However, one mutual behaviour is seen here, the intermediate-short
pair has distributions of lags and normalized lags more similar than the pair intermediate-long does.
This feature is seen both in the A-D tests applied on the median lags/normalized
lags and in the A-D tests of the MC data samples.
MC simulations also confirm results of K-S tests applied
directly on the measured peak rates. We can conclude that the short-,
intermediate- and long-duration bursts
have different distributions of peak count rates.
The results of MC simulations comparing spectral lags
and normalized lags are shown in Fig.~\ref{fig:F_MC}.
\begin{deluxetable}{cccc}
\tablecaption{Monte Carlo double-check of results from the statistical tests.
\label{tab:montecarlo}}
\tablewidth{0pt}
\tablehead{
\colhead{Tests} &
\colhead{Inter.-Short} &
\colhead{Inter.-Long} &
\colhead{Short-Long}
}
\startdata
Lags & $8\,556$ ($85.6\,\%$) & $6\,938$ ($69.4\,\%$) & \phn\phn\phd$0$ ($0.0\,\%$) \\
Norm. lags & $9\,936$ ($99.4\,\%$) & $8\,862$ ($88.6\,\%$) & $1\,458$ ($14.6\,\%$) \\
Peak rates & \phn\phd$47$ ($0.5\,\%$) & \phn\phn\phn$0$ ($0.0\,\%$) & \phn\phn\phd$0$ ($0.0\,\%$) \\
\enddata
\tablecomments{The number of cases out of 10\,000 MC cycles (and their percentages)
are noted for A-D (lags and norm. lags) and K-S (peak rates) probability values exceeding 5\,\%
for tests done on spectral lags, normalized lags, peak-count rates, and on different pairs of GRB groups.}
\end{deluxetable}
\begin{figure}[h]
\centering
$
\begin{array}{cc}
\includegraphics[trim=9mm 3mm 1mm 6mm,clip=true,width=0.48\textwidth]{lags_AD_MC.eps}
&
\includegraphics[trim=9mm 3mm 1mm 6mm,clip=true,width=0.48\textwidth]{normlags_AD_MC.eps}
\end{array}$
\caption{
\emph{Left panel:}
The A-D probabilities of the tests applied on the samples of lags (left panel) and normalized lags
(right panel) obtained from 10\,000 MC cycles for different GRB groups.
The horizontal solid line denotes the 5\,\% threshold.
}
\label{fig:lags_MC}
\end{figure}
\begin{figure}[h]
\centering
\includegraphics[trim=7mm 3mm 1mm 6mm,clip=true,width=0.48\textwidth]{peak_fluxes_K-S_MC_2.eps}
\caption{The K-S probabilities of the tests applied on the samples of peak-count rates obtained from
10\,000 MC cycles for different GRB groups.}
\label{fig:F_MC}
\end{figure}
\section{Discussion}
\label{sec:dis}
\subsection{Comparison with the BATSE database}
The lags of GRBs from the BATSE dataset are different for the short and long groups
\citep{nor01}: for the short bursts the lags on average are close to zero, but for
the long bursts they are positive. Norris and his collaborators did not study the
lags of the intermediate bursts separately \citep{nor01,nor02,nobo06}. For the sake
of completeness we have attempted to do this for the publicly
available data. \citet{ho06} defines membership within the groups for all BATSE GRBs.
Additionally, \citet{nor02} defines for any GRB with $T_{90} > 2\,$s its
lag\footnote{\url http://heasarc.gsfc.nasa.gov/docs/cgro/analysis/lags/web\_lags.html}.
Compilation of these two lists and the application of the A-D test on the lags of
the three BATSE groups (here for the first time)
produced the results collected in Tab.~\ref{tab:BATSE_lags} and shown in
Fig.~\ref{fig:BATSE_lags}. The short (intermediate, long) group contains 33 (119, 1179)
objects here. Of course, one must keep in mind that this sample is drastically truncated
for the short bursts. Hence, the short-intermediate and the short-long comparisons,
can serve only as a qualitative indicators. Even the intermediate-long pair cannot
be taken as representative because the truncation $T_{90} > 2\,$s can also omit
several intermediate GRBs.
\begin{figure}[h]
\centering
$
\begin{array}{cc}
\includegraphics[trim=8mm 2mm 2mm
6mm,clip=true,width=0.48\textwidth]{cumulative_distr_lags_BATSE_CB.eps}
&
\includegraphics[trim=8mm 2mm 1mm 5mm,clip=true,width=0.48\textwidth]
{cumulative_distr_peakfluxes_BATSE_CB.eps}
\end{array}$
\caption{Cumulative distributions of the spectral lags (left panel)
and peak-fluxes (right panel) for the three BATSE GRB groups are shown.}
\label{fig:BATSE_lags}
\end{figure}
\begin{table}[h]
\centering
$
\begin{array}{cccc}
\begin{tabular}{ccc}
\hline\hline
Groups & A-D $P$ \\
& (\%) \\
\hline
inter.-short & 51.3 \\
inter.-long & 3.8 \\
short-long & 9.7 \\
\hline
\end{tabular}
\begin{tabular*}{1.0cm}{c}
\\
\end{tabular*}
\begin{tabular}{cccc}
\hline\hline
Group & Mean $L$ & Median $L$ & $\sigma$ \\
& (ms) & (ms) & (ms) \\
\hline
short & 177.1 & 72.0 & 454.6 \\
inter. & 207.5 & 60.0 & 464.2 \\
long & 390.7 & 94.0 & 848.2 \\
\hline
\end{tabular}
\end{array}$
\caption{
\emph{Left part:}
Results from the A-D test of the equality of the spectral
lag distributions for the BATSE GRBs. $P$ denotes the P-value of the test.
\emph{Right part:}
The mean, median and standard deviations $\sigma$ of the lags.}
\label{tab:BATSE_lags}
\end{table}
Keeping all this in mind, if the lags are taken into consideration,
we can say that there is some similarity between the BATSE and the
RHESSI databases. First of all, there is a similarity with regard to
the intermediate-long pair: the difference is confirmed,
though not on an high significance level, but remarkably the significances
from the A-D test are comparable (3.8\,\% and 4.2\,\%).
Second, there is a similarity with regard to the intermediate-short pair:
in both databases the A-D test reveals that for these two groups the distributions
of GRB lags are similar; the significances are 51.3\,\% for BATSE,
and 16.8\,\% for RHESSI. However, one must again keep in mind that our BATSE sample of
short bursts is truncated and the sample of intermediate bursts as well.
Third, both databases show a difference between the average lags for the short-long
pairs (for the BATSE databases the difference between the distributions
is shown not to be significant; the A-D P-value is only 9.7\,\%,
probably as a result of the sample truncation, but \citet{nor01} makes this claim unambiguously)
The results of the K-S tests applied on the peak-fluxes of the 64\,ms resolution light-curves
for the BATSE data imply that the distributions are different over all three groups.
The short (intermediate, long) group contains 502 (169, 1282) objects here, and the K-S tests are
summed in Tab.~\ref{tab:BATSE_peak_fluxes} and shown in Fig.~\ref{fig:BATSE_lags}.
These results are to be expected because, for example, \citet{na07} claims that the peak-fluxes
of short GRBs are roughly $20\times$ smaller than those of the long ones. It is also
known that the intermediate BATSE group is ``intermediate" concerning the fluence \citep{mu98}.
\begin{table}[h]
\centering
$
\begin{array}{cccc}
\begin{tabular}{ccc}
\hline\hline
Groups & $D$ & K-S $P$ \\
& & (\%) \\
\hline
inter.-short & 0.30 & $<10^{-6}$ \\
inter.-long & 0.13 & 1.0 \\
short-long & 0.21 & $<10^{-6}$ \\
\hline
\end{tabular}
\begin{tabular*}{1.0cm}{c}
\\
\end{tabular*}
\begin{tabular}{cccc}
\hline\hline
Group & Mean & Median & $\sigma$ \\
& $F$ & $F$ & \\
\hline
short & 4.00 & 2.15 & 6.80 \\
inter.& 3.15 & 1.29 & 4.91 \\
long & 4.09 & 1.51 & 10.31 \\
\hline
\end{tabular}
\end{array}$
\caption{
\emph{Left part:}
Results of the K-S test applied on the peak-fluxes $F$\,(ph.cm$^{-2}$.s$^{-1}$) of the
BATSE GRBs. The shortcuts have the same meaning as in Tab.~\ref{tab:peak_count_rates}.
\emph{Right part:}
The means, medians, and standard deviations of the peak-fluxes are listed.}
\label{tab:BATSE_peak_fluxes}
\end{table}
Therefore, our comparison of these RHESSI and BATSE groups finds similarities.
In the case of BATSE database, all three groups are different
in respect to two quantities (duration and peak-flux).
It is remarkable that for BATSE the hardness of intermediate group
is strongly anticorrelated with the duration \citep{ho06}. Since the hardness of the
intermediate group differs from the hardnesses of the short and long ones, these studies
support the opinion that all three BATSE groups represent different phenomena.
\subsection{Comparison with the Swift database}
The lags of the GRBs from the Swift dataset are also different for the short and long groups
\citep{ugar11}. \citet{ugar11} also discuss the lags of the intermediate bursts,
and they find a behavior which does not resemble the cases found in
the RHESSI and BATSE datasets.
The Swift's intermediate-long pair has on average similar lags, but there is a statistically
significant difference in the short-intermediate pair. Thus, if the lags are considered,
the Swift's intermediate group is similar to the Swift's long group \citep{ugar11}.
On the other hand, the peak-fluxes differ significantly in the short-intermediate and intermediate-long pairs,
respectively. The peak-fluxes of the short-long pair are not different from the statistical point of
view \citep{ve10}. Nevertheless, \citet{ugar11} concludes that ``Swift's intermediate
bursts differ from short bursts, but exhibit no significant differences from long bursts apart from
their lower brightness". In other words, in the Swift database there is a clear similarity between
the intermediate group and the long one. The physical difference of the short
and long bursts in the Swift database further holds \citep{ve10,ugar11}.
Comparison with the Swift's groups leads to the conclusion that
the third group in the Swift database is strongly related to the long group, as stated by
\citet{ve10} and \citet{ugar11}, and only the short group should represent another phenomenon.
There is a difference in the hardness, peak-flux, and duration for the intermediate-long pair
\citep{ho08,ve10}, but no clear separation occurs for the lags \citep{ugar11}.
We have no reason to query the conclusions of \citet{ve10}
that the intermediate group is related to XRFs which in turn can be
related to standard long GRBs. We add that the separation within
the long group itself into harder and softer parts is not fully new \citep{pe97,tav98}.
We allow to claim that the intermediate-duration bursts in the RHESSI and Swift
databases are different phenomena. These results followed exclusively from the statistical analyses.
\subsection{Discussion of the number of groups}
\label{sec:H-T90}
In order to provide an extended discussion on the number of GRB groups
we apply clustering methods to our data sample.
This also serves to extend the statistical analysis performed by \citet{rip09}.
In general, the clustering methods can be divided into
parametric and non-parametric types. Parametric methods assume that the data follow a pre-defined model
(in our case a sum of multivariate Gaussian functions). These methods assign for each GRB a
probability of membership in a certain group. The non-parametric methods, e.g. K-means clustering,
provide definite assignments of each burst to a given group.
More details about these methods can be found in the book by \citet{eve11}.
Model-based clustering is also described in \citet{mcla00}.
We apply model-based clustering and K-means clustering methods on
our RHESSI data sample by using the algorithms
implemented in the R software.
\subsubsection{Model-based clustering method}
\label{sec:model-based-clust}
In this method we assume that the distribution of the tested parameters
(logarithms of durations, hardness ratios, peak-count rates, and normalized lags)
follow a superposition of Gaussian functions.
Similar analysis for GRB classification was done by \citet{mu98,ho06} and \citet{ve10}.
The Maximum Likelihood method is used to find the best-fitted model parameters.
Adding more free parameters to a fitted model can increase the likelihood, but also may result in
overfitting. It is possible to penalize a model for more free parameters.
This can be done by a method called the Bayesian Information Criterion
(BIC) presented by \citet{schw78}. The function which must be maximized to get the
best-fitted model parameters is:
BIC = 2\,ln\,l$_\mathrm{max}$ - $m$\,ln\,$N$, where l$_\mathrm{max}$ is the maximum
likelihood of the model, $m$ is the number of free parameters, and
$N$ is the size of the sample. In our work we use the BIC to determine the most probable model,
its parameters and the number of its components.
For model-based clustering, we use \emph{Mclust}
package\footnote{http://cran.r-project.org/web/packages/mclust/index.html}
\citep{fra00} of R. For the explanation of different models, see the Mclust
manual\footnote{http://www.stat.washington.edu/research/reports/2006/tr504.pdf}.
The nomenclature of the different models in Mclust involves the following designations:
the volumes, the shapes and the orientation of the axes of all clusters may be equivalent (E) or may
vary (V) and the axes of all clusters may be restricted to parallel orientations
with the coordinate axes (I).
\subsubsection{Model-based clustering - 2 variables}
First, we start with a two-dimensional case and fit $T_{90}$ durations and hardnesses $H$.
The data sample consists of 427 bursts (Table~7 of \citet{rip09} and Table~\ref{tab:dec-corr}).
In this case the number of free parameters of the model with $k$ bivariate
Gaussian components is $6k - 1$ ($2k$ means, $2k$ standard deviations, $k$ correlation coefficients,
and $k-1$ weights, because the sum of the weights is 1). For the most general model, all parameters
are free. However, sometimes we want to test models where some of the parameters
between different components are in a relation with other parameters, e.g. all components
have the same weight or shape, etc. In this case the number of degrees of freedom
is reduced.
As seen in Fig.~\ref{fig:BIC-T90H} the best fitted model has $k=2$ components with equal volumes,
variable shapes, and with the axes of all clusters parallel with the coordinate axes (EVI model).
This best fitted model has a value of $\mathrm{BIC} = -681.5$. The EVI model with one component
gives $\mathrm{BIC} = -899.1$ and with three components $\mathrm{BIC} = -701.8$.
For all other tested models with $k=1$ component the highest BIC is -820.3 and
with $k=3$ components -694.3, which are clearly below the maximum.
The difference between the BIC of two models gives us information
about the goodness. According to \citet{kas95} and \citet{mu98}, a difference in BIC of
less than 2 represents weak evidence, difference between 2 and 6 represents positive evidence, between
6 and 10 strong evidence, and difference greater than 10 represents very strong evidence in favor of
the model with the higher BIC.
In our case the difference between the best fitted model (EVI) with two components and
the EVI models with one or three components is always higher than 10.
This gives a strong support for the EVI model with $k=2$ components.
The two components are short/hard and long/soft groups. The intermediate-duration bursts
showed in Fig.~\ref{fig:groups} are assigned to the short/hard group by this test.
\subsubsection{Model-based clustering - 3 variables}
\label{sec:H-T90-F}
Next we perform model-based clustering on three variables:
$T_{90}$ durations, hardnesses $H$, and peak-count rates $F$.
Since the peak rates were measured for all events,
the sample here also consists of all 427 bursts (Table~\ref{tab:database}).
The best fitted model has $k=3$ components (see Fig.~\ref{fig:BIC-T90H})
with equal volumes, equal shapes and equal correlation coefficients between all clusters (EEE model).
This best model has a value of $\mathrm{BIC} = -1156.6$.
The EEE model with two components gives $\mathrm{BIC} = -1168.7$ and
for four components $\mathrm{BIC} = -1174.6$.
Markedly high values of BIC are also obtained for the EEI, VEI, and VVI models
with $k=3$ components, $\mathrm{BIC} = -1166.2$, $\mathrm{BIC} = -1162.5$, and
$\mathrm{BIC} = -1166.1$, respectively.
The difference in BIC between the EEE model with three components and
the EEE models with two or four components is $>10$.
The other models with other number of components (except above-mentioned EEI, VEI,
and VVI models with three components) gives BIC value lower by at least 10.
This provides a strong evidence in the favor of EEE
model with $k=3$ components. The group structure of this model with three
components is shown in Fig.~\ref{fig:BIC-FT90H-scattplot}.
The intermediate-duration bursts showed in Fig.~\ref{fig:groups} are assigned to the
short/hard group by this test. A new result here is that the group of long bursts is separated
into high- and low-peak flux clusters.
\begin{figure}[h]
\centering
$
\begin{array}{cc}
\includegraphics[trim=31mm 2mm 9mm 16mm,clip=true,height=0.47\textwidth,angle=-90]{bic-2par-models.eps}
&
\includegraphics[trim=31mm 2mm 9mm 16mm,clip=true,height=0.47\textwidth,angle=-90]{bic-3par-models.eps}
\end{array}$
\caption{
\emph{Left panel:}
Bayesian information criterion (BIC) values for different models as a function of the number
of bivariate Gaussian components. The higher the BIC value, the more probable the model.
The most probable model is EVI with two components. The data sample consists of two variables:
$T_{90}$ durations and hardness ratios.
\emph{Right panel:}
BIC values for different models plotted against the number of components.
The most probable model is EEE with three components.
The data sample consists of three variables: $T_{90}$ durations,
hardness ratios $H$, and peak-count rates $F$.}
\label{fig:BIC-T90H}
\end{figure}
\begin{figure}[h]
\centering
\includegraphics[trim=9mm 9mm 9mm 9mm,clip=true,height=0.9\textwidth,angle=-90]{bic-3par-scatterplot2.eps}
\caption{A scatter plot of 427 bursts, with measured $T_{90}$, $H$, and $F$ assigned into three
groups by the EEE model.}
\label{fig:BIC-FT90H-scattplot}
\end{figure}
\subsubsection{Model-based clustering - 4 variables}
\label{sec:H-T90-F-L/T90}
Spectral lags of BATSE GRBs, i.e. the time delay between low and high energy photons from short and long
groups have been found to differ. For short bursts, an average
lag is $\sim 20-40$ times shorter than for long bursts, and the lag distribution is close to
symmetric about zero - unlike long bursts \citep{nor01,nor02,nobo06}. This result gave us
an idea to incorporate the spectral lags as well.
In this part we apply the model-based clustering on GRB peak-count rates $F$,
$T_{90}$ durations, hardness ratios $H$, and as a new addition to the variables, on normalized
lags $L/T_{90}$. Since the RHESSI spectral lags were calculated only for 142 bursts
(Table~\ref{tab:database}), our sample is truncated.
The best fitted model has $k=2$ components and is unconstrained, i.e. it has variable volumes,
variable shapes and variable correlation coefficients (VVV).
The best BIC value for this model is $\mathrm{BIC} = -1768.4$.
However, the VVV model with $k=3$ components gives similar value of $\mathrm{BIC} = -1768.5$.
The other models gives BIC value lower by at least 10. This strongly supports the VVV model with
$k=2$ components. There is no need to introduce VVV model with three components that has more free parameters.
The two components are separated accordingly to the values of normalized lags into zero- and non-zero-lag events.
\subsubsection{Summary of model-based clustering}
The model-based clustering of two-parameter data ($T_{90}$ and $H$) gives a strong evidence
in favor of EVI model with two components. The analysis of three-parameter data ($T_{90}$, $H$ and $F$)
shows that the best fitted model is EEE with three components. Surprisingly a new result is obtained here;
the group of long bursts is separated into high- and low-peak flux clusters.
The analysis of four-parameter data ($T_{90}$, $H$, $F$ and $L/T_{90}$) supports
the VVV model with two components only. The separation into the two components here is accordingly
to the values of normalized lags into zero- and non-zero-lag events.
\begin{deluxetable}{ccccccccc}
\tabletypesize{\footnotesize}
\tablecaption{A summary of the results from the model-based clustering.
\label{tab:discussion-tests}}
\tablewidth{0pt}
\tablehead{
\colhead{} &
\colhead{Model} &
\colhead{$k$} &
\colhead{$\mathrm{BIC}$} &
\colhead{$\mathrm{\Delta BIC}$} &
\colhead{$\mathrm{\Delta BIC}$} &
\colhead{$\mathrm{\Delta BIC}$} &
\colhead{$\mathrm{\Delta BIC}$} &
\colhead{Evidence} \\
\colhead{}&\colhead{}&\colhead{}&\colhead{}&\colhead{k=1}&\colhead{k=2}&\colhead{k=3}&\colhead{k=4}&\colhead{}
}
\startdata
2 par. & EVI & 2 & -681.5 & $>10$ & $\times$ & $>10$ & & very strong \\[0.5ex]
\hline\\[-2ex]
3 par. & EEE & 3 & -1156.6 & &$>10$ & $\times$ & $>10$ & very strong \\[0.5ex]
\hline\\[-2ex]
4 par. & VVV & 2 & -1768.4 & $>10$ & $\times$ & $>10$ & & very strong \\[0.5ex]
\enddata
\tablecomments{The results for model-based clustering applied on 2, 3, and 4 parameters is presented.
The values of BIC for the best fitted models with $k$ components are listed, as well as the
differences to the models with other number of components.}
\end{deluxetable}
\subsubsection{K-means clustering}
\label{K-means-HT90}
One of the non-parametric clustering methods is K-means \citep{mac67}.
Before we use our data for this method we scale them, i.e. we subtract
the mean value and then divide them by the standard deviation.
The reason for this procedure is that the clustering algorithm is sensitive to the distance
scale of the variables. For more details about the application of the K-means
method in a similar analysis of GRB data, see, e.g. \citet{cha07} or \citet{ve10}.
For this clustering method we use \emph{kmeans} package implemented in the R software.
To use the K-means method, one must set the number of clusters beforehand.
Then the corresponding number of centers is found by minimizing the sum of
squared distances from each burst to the center of the group to which they belong.
There is no precise way to determine the best number of clusters
with this method. However, it has been suggested that if one plots the within-group
sum of squares (WSS) as a function of the number of clusters, then an ``elbow" will indicate
the best number \citep{har75}. This method do not provide any probability indicating the
significance or insignificance for the given best number of clusters.
The calculated WSS as a function of the number of groups for our data samples using 2 ($T_{90}$, $H$),
3 ($T_{90}$, $H$, $F$), and 4 ($T_{90}$, $H$, $F$, $L/T_{90}$) variables are rather smooth and do not
demonstrate any remarkable and sharp ``elbows'' and thus do not bring useful information on the GRB classification.
\subsection{Discussion of the results}
The K-S tests applied on peak-count rates show that the distributions
are different over all three groups. The K-S significance level
for the short-long pair is $<10^{-6}$\,\%,
for the intermediate-long pair it is $3\times$10$^{-5}$\,\%,
and for the intermediate-short one it is 0.9\,\%.
The short and long GRBs have clearly different distributions of peak rates.
Also the intermediate and long GRBs have clearly different distributions of peak rates.
The intermediate-short pair also exhibits different distributions (K-S probability $<5$\,\%),
however less markedly than the other pairs of groups do.
These results are confirmed by MC simulations.
The A-D tests applied on distributions of spectral lags unveil that
the A-D probability for the short-long pair is $<10^{-3}$\,\%,
for the intermediate-long pair the A-D probability is 4.2\,\%,
and for the intermediate-short one it is 16.8\,\%.
The short and long GRBs have clearly different distributions of spectral lags.
The intermediate and long GRBs have A-D probability $<5$\,\%, however in this case the difference is not strong.
The intermediate-short pair does not exhibit different distributions.
The difference in the spectral lag distributions of the short-long pair of GRB groups is confirmed by MC simulations.
In case of intermediate-short and intermediate-long pairs the MC simulations reveal the same tendency as the A-D tests
applied directly on the measured values, i.e. the intermediate-short pair has more similar
distributions of spectral lags than the intermediate-long pair has. However, MC simulations gives A-D probability
higher than 5\,\% more often then expected. A possible reason is commented in Sec.~\ref{sec:mc-sim}.
The A-D tests applied on distributions of normalized lags show that
these distributions cannot be claimed as different. The A-D probability for the short-long pair is 6.0\,\%,
for the intermediate-long pair it is 45.0\,\%, and for the intermediate-short one it is 54.2\,\%.
Here one can see the same tendency as in the case of A-D tests applied on spectral lags, i.e.
the short-long pair has the least similar distributions of lags, the intermediate-long couple stays in the middle,
and the intermediate-short pair has the most similar distributions.
This tendency appears also in the MC simulations, however the absolute frequency of the cases when A-D probability
exceeds 5\,\% level happens more often then expected. The reason could be the same as in the case of MC simulations
applied on absolute values of spectral lags.
The model-based clustering of two-parameter data ($T_{90}$ and $H$) gives a strong evidence
in favor of a model with two components only. The two components are short/hard and long/soft groups.
The intermediate-duration bursts showed in Fig.~\ref{fig:groups} are assigned to the short/hard group by this test.
The analysis of three-parameter data ($T_{90}$, $H$ and $F$) shows that the best fitted model has three components.
Surprising point here is that this method separates the group of long bursts into high- and low-peak flux clusters.
The analysis of four-parameter data ($T_{90}$, $H$, $F$ and $L/T_{90}$) supports a model with two components which
are separated accordingly to the values of normalized lags into zero- and non-zero-lag events.
Surveying the A-D and K-S tests of the RHESSI data, it should be raised
that the difference between the short and long bursts was again strongly confirmed.
This follows from the different distributions of the spectral lags
and by the different distributions of the peak-count rates, both results were confirmed by the MC method.
This is already an expectable result, but - usefully - this result came from a new observational database.
According to Fig.~\ref{fig:groups} the intermediate-short pair of groups have similar hardness ratios.
Also according to the results of the A-D test of the spectral lags, the distributions of lags are not different
for the intermediate-short pair. However, the intermediate-duration and short-duration bursts are not completely same
because their peak-count rate distributions differ. On the other hand, the intermediate-long pair of groups differs
in hardness ratios, spec. lags, and peak-count rates.
Therefore, in our opinion, it is possible that the intermediate group detected by RHESSI
in Sec.~\ref{sec:sample} and by \citep{rip09} may be a longer tail of standard short/hard bursts.
This can be supported also by the fact that model-based clustering method applied on hardness ratios and durations
unveil only two clusters as the best solution; classical short/hard and long/soft groups, the intermediate-duration
bursts are assigned to the short group.
The RHESSI intermediate and long groups seem to be different phenomena.
This difference is supported by the distribution of the peak-count rates and spectral lags.
The results show that the intermediate group is also ``intermediate" with regard to its lags.
The intermediate group detected by Swift was found to be related to XRFs \citep{ve10},
and those may in turn belong to the standard long GRBs \citep{kip03}.
In the case of RHESSI, the longer and softer GRBs are
more difficult to be detected, because RHESSI's sensitivity declines rapidly below
$\approx50$\,keV and the weak and soft GRBs are not easily observable \citep{rip09}.
On the other hand, Swift is less sensitive in the photon-energy range $>150$\,keV.
But softer GRBs are readily detectable with this instrument.
Hence, in our opinion, an instrumental effect may be responsible for that the two satellites
(Swift and RHESSI) detected different intermediate groups.
This means that - from the statistical point of view - different groups
can be found if one looks at different databases.
There are bursts observed with properties similar to the short bursts
(hardness, lag) except their durations exceed 2\,s. For example, beyond \citet{ge06} and
\citet{kan11} mentioned already in Introduction,
\citet{nobo06} claims that ``short bursts with extended emission
(SGRBEE) can have $T_{90} > 2$\,s". Furthermore, also others \citep{deba11}
propose the astrophysical fragmentation of the short GRB group.
Concerning SGRBEE, we inspected the light-curves of all 18 RHESSI intermediate bursts,
but we found no softer extended emission coming after the main hard spike as is
typical for this kind of bursts. Figure~3 of \citet{per09} shows that the average $T_{90}$ duration
of the initial spike of an SGRBEE lies between the average durations of short and long bursts.
If RHESSI detects only the hard initial spike, and the softer extended emission is lost
in the noise, then the detected intermediate group might be polluted
by these objects.
Therefore, we also checked the light-curves of the RHESSI intermediate bursts as observed
by Konus-Wind \citep{apt95}, because it also has a good sensitivity below 50\,keV
(its range is $10-10\,000$\,keV). It has an overlap with the following RHESSI intermediate bursts:
GRB~020819A, GRB~030410, GRB~040329, GRB~050530, GRB~070802, GRB~070824, GRB~080408.
However, no extended emission was observed for these seven bursts by Konus-Wind.
This observation indicates the RHESSI intermediate GRBs
should not be dominantly polluted by SGRBEEs.
Furthermore, there are also additional indications that GRBs
which do not belong to the long+XRF pair category,
may originate from a broad range of astrophysical phenomena. For example, \citet{mu98} found four
subclasses in the BATSE database from the year 1998, but the fourth group was populated by a single
GRB. From the statistical point of view, such an object is an outlier of uncertain origin.
Likewise, similar situations exist concerning the objects GRB~060614 \citep{ge06} and
GRB~110328A \citep{cu11}. Any study of such a single unusual object is beyond
the scope of this article providing only statistical analyses.
\section{Conclusions}
\label{sec:sum}
The main results of this study can be summarized as follows:
\begin{itemize}
\item
Maximum Likelihood test in the duration-hardness plane of 427 RHESSI GRBs,
taken from \citet{rip09} but now with six events corrected for decimation,
again exhibits statistically significant third, in duration intermediate, group.
This completes the work \citet{rip09} using the durations and hardnesses only.
\item
The spectral lags and peak-count rates have been calculated for GRBs
observed by the RHESSI satellite for the first time.
The spectral lags were obtained for 142 objects, and the peak-counts for all 427 GRBs.
Hence, we constructed a new observational database for this satellite.
Then the three GRB subgroups were analyzed statistically
with respect to these new spectral lags and peak-count rates.
\item
The difference between short and long groups has been confirmed.
Usefully this result came from a new observational database.
\item
Kolmogorov-Smirnov and Anderson-Darling tests applied on spectral lags and peak-count rates
indicate that the intermediate group in the RHESSI database might be a
longer tail of the short group or at least has some common properties with this short group.
Contrary to this, the intermediate and the long groups are different.
\item
The group of RHESSI intermediate-duration GRBs is not dominantly populated by SGRBEEs.
\item
The intermediate-duration bursts found in the RHESSI and Swift databases
seem to be represented by different phenomena.
\end{itemize}
\begin{deluxetable}{lccrr}
\tabletypesize{\scriptsize}
\tablecaption{The spectral lags and peak-count rates of the RHESSI GRBs.\label{tab:database}}
\tablewidth{0pt}
\tablehead{
\colhead{GRB\tablenotemark{a}} &
\colhead{Group\tablenotemark{b}} &
\colhead{$L$ (ms)\tablenotemark{c}} &
\colhead{$F$ (s$^{-1}$)\tablenotemark{d}} &
\colhead{$\sigma_F$ (s$^{-1}$)\tablenotemark{e}}
}
\startdata
020214 & 3 & 42.4 $^{+ 56.7 }_{- 35.0 }$ & 8885.9 & 221.3 \\
020218 & 3 & 607.0 $^{+ 181.9 }_{- 205.4 }$ & 3630.7 & 92.9 \\
020302 & 3 & & 632.6 & 62.7 \\
020306 & 1 & 1.2 $^{+ 15.7 }_{- 17.3 }$ & 9003.0 & 867.4 \\
020311 & 3 & 641.9 $^{+ 570.7 }_{- 519.2 }$ & 1571.9 & 119.7 \\
020313 & 3 & & 891.2 & 89.3 \\
020315 & 3 & & 504.1 & 91.6 \\
020331 & 3 & & 307.2 & 71.8 \\
020407 & 3 & & 775.9 & 72.4 \\
020409 & 3 & & 268.6 & 56.0 \\
020413 & 3 & & 1153.5 & 130.8 \\
020417 & 3 & & 846.8 & 77.1 \\
020418 & 3 & 108.6 $^{+ 94.2 }_{- 93.1 }$ & 5618.8 & 289.6 \\
020426 & 1 & & 3873.5 & 436.2 \\
020430 & 3 & & 1209.8 & 100.9 \\
020509 & 3 & & 2785.5 & 310.0 \\
020524 & 3 & & 958.1 & 116.7 \\
020525A & 3 & 452.4 $^{+ 502.2 }_{- 2154.8 }$ & 975.4 & 159.5 \\
020525B & 1 & & 3265.2 & 526.6 \\
020527 & 2 & & 1467.1 & 278.8 \\
020602 & 3 & & 2643.6 & 503.4 \\
020603 & 3 & & 6473.9 & 1042.8 \\
020604 & 3 & & 1187.1 & 98.9 \\
020620 & 3 & & 1816.7 & 260.3 \\
020623 & 3 & & 962.8 & 169.9 \\
020630 & 3 & & 997.8 & 94.1 \\
020702 & 3 & & 803.6 & 98.2 \\
020708 & 3 & & 383.9 & 45.8 \\
020712 & 3 & & 724.5 & 115.8 \\
020715A & 1 & & 2449.4 & 397.6 \\
020715B & 3 & 135.7 $^{+ 55.8 }_{- 47.9 }$ & 10359.6 & 247.5 \\
020725 & 3 & & 3985.2 & 469.3 \\
020801 & 3 & & 1740.8 & 384.4 \\
020819A & 2 & 170.6 $^{+ 127.4 }_{- 109.2 }$ & 2235.2 & 353.0 \\
020819B & 3 & & 1014.4 & 96.3 \\
020828 & 1 & 6.3 $^{+ 84.3 }_{- 46.7 }$ & 5356.8 & 862.4 \\
020910 & 3 & & 1792.6 & 215.3 \\
020914 & 3 & & 1284.7 & 268.7 \\
020926 & 3 & & 618.9 & 63.2 \\
021008A & 3 & 12.5 $^{+ 17.6 }_{- 16.2 }$ & 54724.6 & 1415.5 \\
021008B & 3 & & 556.5 & 108.6 \\
021011 & 3 & & 771.1 & 157.9 \\
021016 & 3 & & 647.3 & 98.0 \\
021020 & 3 & & 2275.6 & 125.7 \\
021023 & 3 & & 1787.9 & 270.7 \\
021025 & 3 & & 645.3 & 91.9 \\
021102 & 3 & & 1796.7 & 112.4 \\
021105 & 3 & & 818.0 & 163.6 \\
021108 & 3 & & 1072.2 & 87.1 \\
021109 & 3 & & 903.3 & 85.7 \\
021113 & 3 & & 374.3 & 89.6 \\
021115 & 3 & & 484.9 & 89.2 \\
021119 & 3 & -393.8 $^{+ 3000.4 }_{- 2414.5 }$ & 2129.7 & 277.4 \\
021125 & 3 & & 1434.5 & 174.4 \\
021201 & 1 & 10.5 $^{+ 17.9 }_{- 22.5 }$ & 9868.9 & 1044.7 \\
021205 & 3 & & 579.2 & 39.4 \\
021206 & 3 & 8.9 $^{+ 4.0 }_{- 4.2 }$ & 78241.5 & 3719.6 \\
021211 & 3 & & 1649.6 & 409.6 \\
021214 & 3 & & 279.9 & 44.6 \\
021223 & 3 & & 1169.7 & 338.0 \\
021226 & 1 & 16.3 $^{+ 21.1 }_{- 27.9 }$ & 5997.7 & 403.9 \\
030102 & 3 & & 1125.4 & 99.1 \\
030103 & 3 & & 245.3 & 61.2 \\
030105 & 2 & 6.7 $^{+ 28.4 }_{- 22.7 }$ & 4873.4 & 465.0 \\
030110 & 1 & & 2791.2 & 714.4 \\
030115A & 3 & 328.0 $^{+ 2334.8 }_{- 2009.9 }$ & 1433.8 & 145.1 \\
030115B & 3 & & 970.4 & 214.2 \\
030127 & 3 & & 779.9 & 133.1 \\
030204 & 3 & 196.0 $^{+ 303.8 }_{- 349.4 }$ & 2093.2 & 68.3 \\
030206 & 1 & -6.6 $^{+ 56.8 }_{- 16.8 }$ & 3982.8 & 553.3 \\
030212 & 3 & & 726.2 & 175.5 \\
030214 & 3 & & 2164.6 & 168.9 \\
030216 & 3 & & 530.3 & 93.7 \\
030217 & 3 & & 5531.9 & 548.3 \\
030222 & 3 & & 772.4 & 89.4 \\
030223 & 3 & 1108.2 $^{+ 1740.4 }_{- 1544.0 }$ & 857.3 & 87.0 \\
030225 & 3 & 1338.1 $^{+ 4066.8 }_{- 3979.4 }$ & 566.8 & 67.7 \\
030227 & 3 & -1374.2 $^{+ 5702.5 }_{- 4829.1 }$ & 358.0 & 69.8 \\
030228 & 3 & & 1281.8 & 147.4 \\
030301 & 3 & & 381.6 & 93.5 \\
030306 & 3 & & 2740.5 & 135.1 \\
030307 & 3 & 248.3 $^{+ 63.8 }_{- 61.4 }$ & 7542.7 & 302.7 \\
030320A & 3 & 358.1 $^{+ 1341.7 }_{- 1042.7 }$ & 1782.3 & 198.9 \\
030320B & 3 & 415.8 $^{+ 1825.9 }_{- 1203.6 }$ & 622.6 & 29.4 \\
030326 & 3 & & 2034.3 & 116.4 \\
030328 & 3 & & 891.1 & 141.0 \\
030329A & 3 & 37.5 $^{+ 89.9 }_{- 97.7 }$ & 11876.3 & 219.7 \\
030329B & 3 & & 531.8 & 49.5 \\
030331 & 3 & & 1148.8 & 221.9 \\
030406 & 3 & 240.6 $^{+ 168.9 }_{- 169.7 }$ & 6067.2 & 283.3 \\
030410 & 2 & 23.1 $^{+ 64.6 }_{- 88.5 }$ & 2392.8 & 308.2 \\
030413 & 3 & -257.9 $^{+ 5958.8 }_{- 1027.3 }$ & 1181.4 & 109.1 \\
030414 & 3 & 1171.0 $^{+ 316.2 }_{- 258.2 }$ & 2599.3 & 96.8 \\
030419 & 3 & & 7826.4 & 226.3 \\
030421 & 3 & 69.0 $^{+ 166.4 }_{- 199.6 }$ & 4489.0 & 525.3 \\
030422 & 3 & & 700.1 & 102.2 \\
030428 & 3 & 24.9 $^{+ 22.0 }_{- 27.6 }$ & 5294.8 & 190.8 \\
030501A & 3 & & 1961.3 & 146.9 \\
030501B & 3 & & 404.4 & 96.2 \\
030501C & 2 & & 3029.0 & 600.2 \\
030505A & 3 & & 584.9 & 112.9 \\
030505B & 3 & -267.8 $^{+ 749.1 }_{- 729.4 }$ & 1448.6 & 46.9 \\
030506 & 3 & & 1315.8 & 186.0 \\
030518A & 3 & 81.2 $^{+ 81.8 }_{- 107.4 }$ & 7982.3 & 640.6 \\
030518B & 3 & & 1867.0 & 248.6 \\
030519A & 3 & & 1890.9 & 318.7 \\
030519B & 3 & 17.0 $^{+ 17.2 }_{- 19.5 }$ & 15592.7 & 299.0 \\
030523 & 1 & & 2828.1 & 485.7 \\
030528 & 3 & & 423.7 & 69.7 \\
030601 & 3 & 465.6 $^{+ 679.7 }_{- 929.7 }$ & 1051.9 & 98.4 \\
030614 & 3 & & 898.5 & 32.7 \\
030626 & 3 & 333.6 $^{+ 714.2 }_{- 772.6 }$ & 1434.1 & 78.0 \\
030703 & 3 & & 229.4 & 54.0 \\
030706 & 3 & & 1021.3 & 101.4 \\
030710 & 3 & & 1114.7 & 97.7 \\
030714 & 3 & & 1499.3 & 129.3 \\
030716 & 3 & & 377.2 & 68.3 \\
030721 & 3 & 59.9 $^{+ 659.6 }_{- 318.1 }$ & 12474.1 & 679.8 \\
030725 & 3 & & 1014.1 & 93.0 \\
030726A & 3 & 57.8 $^{+ 166.1 }_{- 129.2 }$ & 2005.1 & 94.7 \\
030726B & 3 & & 400.9 & 51.9 \\
030728 & 3 & & 655.4 & 127.2 \\
030824 & 3 & & 509.0 & 86.0 \\
030827 & 3 & 18.4 $^{+ 74.2 }_{- 54.8 }$ & 3678.2 & 355.2 \\
030830 & 3 & & 1835.5 & 87.4 \\
030831 & 3 & & 1903.3 & 123.3 \\
030919 & 3 & & 828.7 & 143.0 \\
030921 & 3 & & 2414.9 & 127.7 \\
030922A & 3 & & 1418.7 & 111.6 \\
030922B & 3 & 38.7 $^{+ 164.4 }_{- 146.0 }$ & 4042.1 & 171.3 \\
030926 & 1 & & 2094.0 & 422.3 \\
031005 & 3 & & 661.9 & 132.6 \\
031019 & 3 & & 1260.8 & 160.4 \\
031024 & 3 & 218.6 $^{+ 394.5 }_{- 421.0 }$ & 3055.6 & 552.4 \\
031027 & 3 & 128.2 $^{+ 111.0 }_{- 125.4 }$ & 4102.5 & 113.9 \\
031107 & 3 & -121.0 $^{+ 1243.7 }_{- 1523.6 }$ & 1183.0 & 86.4 \\
031108 & 3 & 88.9 $^{+ 69.3 }_{- 82.9 }$ & 5923.9 & 157.4 \\
031111 & 3 & 57.1 $^{+ 28.6 }_{- 31.6 }$ & 13882.4 & 445.7 \\
031118 & 1 & & 4669.8 & 592.7 \\
031120 & 3 & 1147.6 $^{+ 637.5 }_{- 1068.6 }$ & 991.2 & 57.3 \\
031127 & 3 & & 634.4 & 94.5 \\
031130 & 3 & & 1354.9 & 155.0 \\
031214 & 3 & & 609.8 & 159.3 \\
031218 & 1 & 53.4 $^{+ 137.0 }_{- 48.9 }$ & 4732.5 & 1067.7 \\
031219 & 3 & 334.0 $^{+ 589.0 }_{- 350.8 }$ & 3262.1 & 439.7 \\
031226 & 3 & & 640.4 & 72.5 \\
031226 & 3 & & 612.8 & 86.8 \\
040102 & 3 & & 1160.1 & 139.1 \\
040108 & 3 & & 395.7 & 70.3 \\
040113 & 3 & & 1183.6 & 193.7 \\
040115 & 3 & & 473.1 & 84.0 \\
040125 & 3 & & 528.6 & 111.7 \\
040205A & 3 & & 497.4 & 152.7 \\
040205B & 3 & & 378.0 & 76.3 \\
040207 & 3 & 23.6 $^{+ 91.0 }_{- 84.4 }$ & 3375.9 & 115.9 \\
040211 & 3 & & 751.8 & 181.5 \\
040215 & 3 & & 391.7 & 46.9 \\
040220 & 3 & 1041.1 $^{+ 1167.5 }_{- 1171.1 }$ & 1714.0 & 178.9 \\
040225A & 3 & & 576.5 & 104.0 \\
040225B & 3 & & 642.5 & 90.0 \\
040228 & 3 & 19.2 $^{+ 39.5 }_{- 36.0 }$ & 11483.8 & 261.6 \\
040302A & 3 & & 712.0 & 98.2 \\
040302B & 3 & 101.8 $^{+ 47.2 }_{- 32.4 }$ & 13904.2 & 283.4 \\
040303 & 3 & & 403.8 & 86.2 \\
040312 & 1 & 2.7 $^{+ 27.4 }_{- 14.8 }$ & 7197.9 & 690.1 \\
040316 & 3 & -161.3 $^{+ 316.9 }_{- 380.0 }$ & 4356.0 & 452.4 \\
040323 & 3 & & 650.8 & 174.9 \\
040324 & 1 & 2.7 $^{+ 4.5 }_{- 6.5 }$ & 16984.5 & 1112.0 \\
040327 & 3 & & 405.8 & 56.2 \\
040329 & 2 & 3.6 $^{+ 9.2 }_{- 9.6 }$ & 19974.8 & 849.1 \\
040330 & 3 & & 584.5 & 98.3 \\
040404 & 3 & & 1096.6 & 183.6 \\
040413 & 1 & & 5163.3 & 594.7 \\
040414 & 3 & & 992.3 & 59.5 \\
040421 & 3 & 98.6 $^{+ 68.9 }_{- 75.1 }$ & 7197.3 & 220.2 \\
040423 & 3 & & 663.4 & 111.5 \\
040425 & 3 & 155.3 $^{+ 139.1 }_{- 126.2 }$ & 7424.3 & 843.8 \\
040427 & 3 & & 674.8 & 112.1 \\
040429 & 3 & & 582.8 & 85.4 \\
040502A & 3 & & 3551.8 & 310.5 \\
040502B & 3 & & 602.9 & 48.9 \\
040506 & 3 & & 807.0 & 61.2 \\
040508 & 3 & & 242.0 & 64.4 \\
040510 & 3 & & 1097.3 & 123.3 \\
040513 & 3 & & 218.9 & 49.2 \\
040526 & 3 & & 499.1 & 96.2 \\
040528 & 3 & 731.5 $^{+ 1067.9 }_{- 1191.3 }$ & 1827.0 & 119.2 \\
040531 & 3 & & 1024.5 & 75.3 \\
040601 & 3 & & 401.3 & 93.9 \\
040603A & 3 & & 726.3 & 138.6 \\
040603B & 3 & & 161.9 & 26.5 \\
040605A & 3 & & 800.3 & 224.4 \\
040605B & 1 & 26.0 $^{+ 45.0 }_{- 50.8 }$ & 8657.1 & 1243.4 \\
040605C & 3 & & 1271.6 & 87.0 \\
040611 & 3 & -2267.0 $^{+ 2983.7 }_{- 1345.5 }$ & 740.1 & 87.8 \\
040619 & 3 & & 3308.1 & 480.5 \\
040701 & 3 & 135.0 $^{+ 245.2 }_{- 318.9 }$ & 1128.0 & 103.9 \\
040719 & 3 & & 1445.1 & 177.0 \\
040723 & 3 & & 4168.6 & 697.7 \\
040731 & 3 & 110.2 $^{+ 244.6 }_{- 257.4 }$ & 1803.4 & 91.5 \\
040803 & 3 & & 267.3 & 39.1 \\
040810 & 3 & & 1654.2 & 109.7 \\
040818 & 3 & 33.2 $^{+ 97.4 }_{- 153.4 }$ & 2681.5 & 225.6 \\
040822 & 2 & & 2226.7 & 374.5 \\
040824 & 3 & & 282.2 & 45.8 \\
040921 & 1 & & 2619.0 & 490.6 \\
040925 & 3 & -136.0 $^{+ 2908.9 }_{- 1745.9 }$ & 1263.8 & 167.1 \\
040926 & 3 & 4.1 $^{+ 92.7 }_{- 83.7 }$ & 5855.8 & 207.3 \\
041003 & 3 & & 735.4 & 182.2 \\
041006 & 3 & & 1307.7 & 254.1 \\
041007 & 2 & 101.0 $^{+ 119.5 }_{- 123.3 }$ & 3301.3 & 238.6 \\
041009 & 3 & & 1382.6 & 111.5 \\
041010 & 1 & 0.7 $^{+ 6.3 }_{- 8.0 }$ & 1838.8 & 616.2 \\
041012 & 3 & & 276.6 & 44.5 \\
041013A & 3 & & 530.3 & 39.7 \\
041013B & 1 & & 3412.2 & 421.8 \\
041015 & 3 & & 1630.8 & 189.7 \\
041016 & 3 & & 509.0 & 78.5 \\
041018 & 3 & 6774.4 $^{+ 6756.0 }_{- 7424.5 }$ & 717.6 & 120.4 \\
041101 & 3 & & 1036.3 & 167.5 \\
041102 & 3 & & 1437.3 & 188.3 \\
041107 & 3 & & 883.0 & 83.3 \\
041116 & 3 & & 269.7 & 52.1 \\
041117 & 3 & & 1165.2 & 106.5 \\
041120 & 3 & & 1167.1 & 123.6 \\
041125 & 3 & -7.8 $^{+ 41.0 }_{- 55.4 }$ & 9489.2 & 168.7 \\
041202 & 3 & 76.3 $^{+ 102.6 }_{- 93.7 }$ & 5824.1 & 198.0 \\
041211A & 3 & 56.3 $^{+ 657.3 }_{- 825.0 }$ & 1066.9 & 83.9 \\
041211B & 3 & & 5126.6 & 629.1 \\
041211C & 3 & -6.0 $^{+ 12.9 }_{- 12.9 }$ & 11832.5 & 315.0 \\
041213 & 1 & & 6872.1 & 669.2 \\
041218 & 3 & & 364.9 & 54.7 \\
041219 & 3 & & 560.3 & 76.9 \\
041223 & 3 & -1.6 $^{+ 208.1 }_{- 211.7 }$ & 1567.8 & 91.6 \\
041224 & 3 & & 285.4 & 73.2 \\
041231 & 2 & 21.6 $^{+ 187.2 }_{- 177.7 }$ & 2211.1 & 206.3 \\
050124 & 3 & & 815.2 & 162.3 \\
050126 & 3 & & 1477.0 & 79.6 \\
050203 & 3 & & 2870.6 & 234.3 \\
050213 & 3 & 44.2 $^{+ 197.8 }_{- 194.9 }$ & 2174.0 & 101.5 \\
050214 & 3 & & 390.4 & 60.3 \\
050216 & 1 & 6.0 $^{+ 99.4 }_{- 76.5 }$ & 4614.1 & 686.4 \\
050219 & 3 & 410.8 $^{+ 321.9 }_{- 186.9 }$ & 4760.8 & 259.7 \\
050311 & 3 & & 465.1 & 88.4 \\
050312 & 1 & 4.4 $^{+ 44.5 }_{- 31.0 }$ & 4439.8 & 419.4 \\
050314 & 3 & & 2101.0 & 151.0 \\
050320 & 3 & & 838.7 & 114.5 \\
050321 & 3 & & 880.5 & 128.4 \\
050326 & 3 & & 3501.9 & 403.9 \\
050328 & 1 & -43.5 $^{+ 79.4 }_{- 84.1 }$ & 12706.3 & 1069.6 \\
050404 & 3 & 29.1 $^{+ 35.6 }_{- 39.0 }$ & 6336.6 & 215.9 \\
050409 & 2 & -1.7 $^{+ 16.9 }_{- 14.1 }$ & 18969.8 & 1936.7 \\
050411 & 3 & & 813.9 & 125.9 \\
050412 & 3 & & 1455.7 & 140.8 \\
050429 & 3 & & 2018.6 & 145.8 \\
050430 & 3 & & 986.8 & 139.4 \\
050501 & 3 & & 1697.9 & 308.8 \\
050502 & 2 & & 1132.2 & 235.6 \\
050509 & 3 & -63.6 $^{+ 397.4 }_{- 494.3 }$ & 2282.4 & 145.7 \\
050516 & 3 & & 259.7 & 71.5 \\
050525A & 3 & & 4853.5 & 259.6 \\
050525B & 3 & 8.5 $^{+ 50.0 }_{- 50.3 }$ & 7311.6 & 214.3 \\
050528 & 3 & & 421.3 & 48.7 \\
050530 & 3 & & 1963.1 & 273.3 \\
050531 & 3 & 56.6 $^{+ 167.3 }_{- 163.6 }$ & 6082.8 & 197.2 \\
050614 & 3 & & 377.5 & 58.6 \\
050701 & 3 & & 1173.5 & 166.1 \\
050702 & 2 & & 951.9 & 187.8 \\
050703 & 3 & & 2734.2 & 305.4 \\
050706 & 3 & & 1620.7 & 168.6 \\
050713A & 3 & -233.3 $^{+ 1804.9 }_{- 2189.4 }$ & 1450.6 & 244.4 \\
050713B & 3 & & 431.1 & 69.1 \\
050715 & 3 & & 2053.4 & 198.9 \\
050717 & 3 & 110.3 $^{+ 151.6 }_{- 220.0 }$ & 2456.3 & 143.1 \\
050726 & 3 & 810.5 $^{+ 1751.7 }_{- 1334.3 }$ & 1480.4 & 90.7 \\
050729 & 3 & & 873.7 & 177.5 \\
050802 & 3 & & 498.5 & 97.6 \\
050805 & 2 & -6.2 $^{+ 30.0 }_{- 20.0 }$ & 3355.3 & 334.6 \\
050809 & 3 & & 4598.3 & 699.3 \\
050813 & 3 & & 810.7 & 110.4 \\
050814 & 1 & & 5499.5 & 1658.3 \\
050817 & 3 & & 895.5 & 118.2 \\
050820 & 3 & & 681.3 & 101.9 \\
050824 & 1 & 1.2 $^{+ 3.2 }_{- 4.0 }$ & 7474.8 & 930.5 \\
050825 & 2 & -82.6 $^{+ 132.9 }_{- 71.3 }$ & 4307.4 & 585.3 \\
050902 & 3 & & 631.9 & 149.5 \\
050923 & 3 & & 1872.2 & 395.3 \\
051009 & 3 & & 475.8 & 66.9 \\
051012 & 3 & 462.5 $^{+ 443.7 }_{- 207.9 }$ & 3780.0 & 331.8 \\
051021 & 3 & -97.8 $^{+ 716.9 }_{- 495.2 }$ & 2117.7 & 221.7 \\
051031 & 3 & & 678.7 & 45.4 \\
051101 & 3 & & 921.2 & 258.0 \\
051103 & 1 & 0.6 $^{+ 2.4 }_{- 2.8 }$ & 135199.6 & 10873.8 \\
051109 & 3 & & 882.0 & 92.0 \\
051111 & 3 & & 516.1 & 102.1 \\
051117 & 3 & -224.8 $^{+ 2133.4 }_{- 3318.9 }$ & 942.4 & 133.9 \\
051119 & 3 & & 743.4 & 133.7 \\
051124A & 3 & & 1234.2 & 194.2 \\
051124B & 3 & & 2715.0 & 152.8 \\
051201A & 3 & & 211.7 & 37.0 \\
051201B & 3 & & 1000.9 & 170.4 \\
051207 & 3 & & 2440.2 & 121.2 \\
051211 & 3 & 592.3 $^{+ 836.1 }_{- 914.9 }$ & 1573.5 & 69.6 \\
051217 & 3 & & 458.7 & 81.7 \\
051220A & 3 & 39.0 $^{+ 24.1 }_{- 26.5 }$ & 19918.0 & 658.7 \\
051220B & 3 & & 321.2 & 73.1 \\
051221 & 1 & 0.0 $^{+ 6.6 }_{- 8.7 }$ & 15280.2 & 1298.8 \\
051222 & 3 & & 334.4 & 78.5 \\
060101 & 3 & 599.3 $^{+ 655.1 }_{- 778.6 }$ & 1647.1 & 94.2 \\
060110 & 3 & & 445.6 & 107.8 \\
060111 & 3 & -2346.6 $^{+ 2031.1 }_{- 1648.5 }$ & 1079.4 & 46.7 \\
060117 & 3 & & 1300.6 & 113.7 \\
060121A & 3 & & 2980.4 & 277.6 \\
060121B & 3 & & 343.0 & 46.7 \\
060123 & 3 & 18.4 $^{+ 624.7 }_{- 529.7 }$ & 3965.2 & 184.2 \\
060124 & 3 & & 713.2 & 76.3 \\
060130 & 3 & & 2285.1 & 376.4 \\
060203 & 1 & -23.5 $^{+ 46.5 }_{- 16.1 }$ & 6089.8 & 831.6 \\
060217 & 3 & & 2432.4 & 416.0 \\
060224 & 3 & -944.0 $^{+ 3150.4 }_{- 1486.0 }$ & 851.0 & 109.2 \\
060228 & 3 & -702.0 $^{+ 1896.9 }_{- 1770.9 }$ & 717.4 & 89.8 \\
060303 & 1 & 21.2 $^{+ 46.3 }_{- 53.5 }$ & 9343.4 & 999.5 \\
060306 & 3 & 50.3 $^{+ 12.5 }_{- 11.0 }$ & 105153.0 & 3272.9 \\
060309 & 3 & & 355.6 & 62.1 \\
060312A & 1 & & 1526.6 & 337.6 \\
060312B & 3 & & 299.2 & 71.4 \\
060313 & 3 & & 602.2 & 103.3 \\
060323 & 3 & -186.2 $^{+ 270.0 }_{- 253.4 }$ & 3754.8 & 169.1 \\
060325 & 3 & 189.3 $^{+ 235.1 }_{- 266.9 }$ & 6080.4 & 638.4 \\
060401 & 3 & 110.2 $^{+ 268.0 }_{- 207.9 }$ & 2527.5 & 208.8 \\
060408 & 3 & & 449.7 & 85.1 \\
060415 & 3 & & 315.7 & 78.6 \\
060418 & 3 & & 694.3 & 74.4 \\
060421A & 3 & & 789.4 & 78.5 \\
060421B & 3 & 312.3 $^{+ 208.1 }_{- 170.2 }$ & 2812.8 & 106.2 \\
060425 & 1 & 5.3 $^{+ 4.3 }_{- 4.8 }$ & 2367.7 & 521.1 \\
060428 & 3 & & 274.9 & 60.2 \\
060429 & 1 & 3.2 $^{+ 14.7 }_{- 14.5 }$ & 20278.7 & 2240.6 \\
060505 & 3 & 277.1 $^{+ 5726.4 }_{- 1422.9 }$ & 1384.9 & 256.7 \\
060528 & 3 & 2441.1 $^{+ 4000.4 }_{- 4289.8 }$ & 656.8 & 77.5 \\
060530 & 3 & & 889.0 & 143.9 \\
060610 & 1 & 10.0 $^{+ 14.0 }_{- 21.1 }$ & 9569.6 & 748.7 \\
060614 & 3 & & 498.7 & 40.7 \\
060622 & 3 & -1510.2 $^{+ 1932.3 }_{- 1524.6 }$ & 1038.4 & 71.5 \\
060624 & 3 & & 9648.0 & 610.3 \\
060625 & 3 & & 1179.5 & 122.8 \\
060630 & 3 & & 1377.2 & 83.7 \\
060708 & 1 & -5.6 $^{+ 21.8 }_{- 20.4 }$ & 16293.3 & 1641.8 \\
060729 & 3 & & 588.4 & 100.8 \\
060805 & 3 & 18.3 $^{+ 54.8 }_{- 69.6 }$ & 10612.2 & 474.3 \\
060811 & 3 & & 1742.2 & 103.9 \\
060819 & 3 & & 949.2 & 159.3 \\
060823 & 2 & & 811.4 & 167.0 \\
060919 & 3 & & 368.6 & 61.5 \\
060920 & 3 & 22.4 $^{+ 61.9 }_{- 80.2 }$ & 4809.8 & 181.2 \\
060925 & 3 & & 1840.6 & 114.1 \\
060928 & 3 & 144.7 $^{+ 154.7 }_{- 151.5 }$ & 5934.5 & 281.3 \\
061005 & 3 & & 2187.7 & 239.8 \\
061006A & 1 & 9.4 $^{+ 167.7 }_{- 123.0 }$ & 6404.0 & 940.7 \\
061006B & 2 & & 2359.0 & 286.1 \\
061007 & 3 & 66.2 $^{+ 170.0 }_{- 201.1 }$ & 3465.1 & 305.1 \\
061012 & 3 & & 1474.0 & 171.6 \\
061013 & 3 & & 776.2 & 102.5 \\
061014 & 1 & & 2308.8 & 291.6 \\
061022 & 3 & & 392.3 & 77.3 \\
061031 & 3 & & 1112.8 & 238.5 \\
061101 & 3 & & 631.7 & 88.3 \\
061108 & 3 & & 2267.8 & 219.2 \\
061113 & 3 & 36.4 $^{+ 67.3 }_{- 89.2 }$ & 3665.0 & 233.6 \\
061117 & 3 & & 251.6 & 56.0 \\
061121 & 3 & -57.8 $^{+ 216.2 }_{- 184.8 }$ & 3131.4 & 176.9 \\
061123 & 3 & & 1360.8 & 172.8 \\
061126 & 3 & 194.3 $^{+ 166.7 }_{- 194.4 }$ & 2897.4 & 120.5 \\
061128 & 1 & -3.0 $^{+ 14.5 }_{- 19.2 }$ & 15183.0 & 1310.9 \\
061205 & 3 & & 427.5 & 83.8 \\
061212 & 3 & 13.0 $^{+ 19.8 }_{- 21.4 }$ & 9820.5 & 440.1 \\
061222 & 3 & & 1367.9 & 263.2 \\
061229 & 3 & & 680.4 & 68.7 \\
061230 & 3 & & 641.3 & 133.8 \\
070113 & 1 & & 1986.8 & 377.5 \\
070116 & 3 & & 736.2 & 111.8 \\
070120 & 3 & & 395.8 & 70.0 \\
070121 & 3 & & 178.8 & 49.4 \\
070125 & 3 & -9.6 $^{+ 167.9 }_{- 207.8 }$ & 4841.5 & 149.0 \\
070214 & 3 & & 425.7 & 98.3 \\
070220 & 3 & & 972.7 & 103.0 \\
070221 & 3 & & 511.0 & 103.0 \\
070307 & 3 & & 502.2 & 56.2 \\
070402 & 3 & & 829.0 & 113.1 \\
070420 & 3 & & 509.7 & 87.5 \\
070508 & 3 & & 1627.4 & 168.9 \\
070516 & 1 & 28.9 $^{+ 59.5 }_{- 70.1 }$ & 2376.6 & 268.6 \\
070531 & 3 & & 149.7 & 34.0 \\
070614 & 1 & 2.4 $^{+ 29.2 }_{- 24.8 }$ & 2874.7 & 275.5 \\
070622 & 3 & 39.9 $^{+ 57.8 }_{- 42.9 }$ & 3397.0 & 207.5 \\
070626 & 3 & & 2058.8 & 111.6 \\
070710 & 3 & & 379.1 & 66.7 \\
070717 & 3 & & 347.5 & 59.0 \\
070722 & 3 & & 321.7 & 77.4 \\
070724 & 3 & & 259.1 & 37.5 \\
070802 & 3 & 15.8 $^{+ 48.0 }_{- 58.1 }$ & 2426.5 & 234.0 \\
070817 & 3 & & 541.2 & 55.5 \\
070819 & 3 & & 915.7 & 75.6 \\
070821 & 3 & 663.0 $^{+ 825.1 }_{- 764.1 }$ & 1956.7 & 77.2 \\
070824 & 2 & -5.9 $^{+ 120.0 }_{- 109.8 }$ & 3276.6 & 284.3 \\
070825 & 3 & 875.1 $^{+ 1116.6 }_{- 1635.1 }$ & 1335.1 & 94.6 \\
070917 & 3 & & 347.2 & 65.7 \\
071013 & 3 & & 963.7 & 211.9 \\
071014 & 3 & & 1094.5 & 155.0 \\
071030 & 3 & & 570.0 & 82.8 \\
071104 & 3 & & 747.0 & 98.2 \\
071204 & 1 & & 2901.5 & 297.5 \\
071217 & 3 & & 814.3 & 143.8 \\
080114 & 3 & 285.7 $^{+ 233.2 }_{- 252.3 }$ & 2321.1 & 144.3 \\
080202 & 3 & & 462.1 & 75.7 \\
080204 & 3 & 60.5 $^{+ 292.9 }_{- 244.0 }$ & 1368.6 & 180.0 \\
080211 & 3 & 1139.6 $^{+ 2898.5 }_{- 1804.8 }$ & 1349.4 & 143.3 \\
080218 & 3 & & 404.1 & 85.2 \\
080224 & 3 & & 1795.1 & 186.7 \\
080318 & 3 & & 385.3 & 90.3 \\
080319 & 3 & 157.9 $^{+ 551.1 }_{- 1112.4 }$ & 885.8 & 111.1 \\
080320 & 3 & 34.0 $^{+ 381.6 }_{- 569.9 }$ & 1722.6 & 110.4 \\
080328 & 3 & & 886.7 & 47.1 \\
080330 & 3 & 26.3 $^{+ 495.1 }_{- 350.8 }$ & 1685.0 & 98.1 \\
080408 & 2 & 73.3 $^{+ 133.6 }_{- 230.4 }$ & 2546.1 & 353.7 \\
080413 & 3 & & 544.1 & 141.2 \\
080425 & 3 & & 674.6 & 120.5 \\
\enddata
\tablenotetext{a}{RHESSI GRB number.}
\tablenotetext{b}{The assignment to the GRB group: 1 - short,\\
2 - intermediate, 3 - long.}
\tablenotetext{c}{Spectral lags were calculated from the difference
of the count light-curves at the energy intervals $400-1500$\,keV and $25-120$\,keV.
The errors compose of the 95\,\%~CL statistical uncertainty and the light-curve's time resolution.}
\tablenotetext{d}{Peak-count rates derived in the band $25-1500$\,keV.}
\tablenotetext{e}{One sigma statistical uncertainties of the peak-count rates.}
\end{deluxetable}
\acknowledgments
We wish to thank Z. Bagoly, L.G. Bal\'azs, I. Horv\'ath and P. M\'esz\'aros
for useful discussions and comments on the manuscript.
We also thank to R. Aptekar and T.L. Cline for providing the Konus-Wind data.
Thanks are also due for the valuable remarks of anonymous referees.
This study was supported by the OTKA grant K77795,
by the Grant Agency of the Czech Republic grant No.
P209/10/0734, by the Research Program MSM0021620860 of the Ministry
of Education of the Czech Republic, and by Creative Research Initiatives
(RCMST) of MEST/NRF and the World Class University grant no R32-2008-000-101300.
|
{
"timestamp": "2012-06-28T02:02:07",
"yymm": "1206",
"arxiv_id": "1206.6198",
"language": "en",
"url": "https://arxiv.org/abs/1206.6198"
}
|
\section{Proposed Methods}
Friedman et al~\cite{Friedman00} showed how boosting~\cite{Schapire98} can be seen as a way of fitting an additive model,
$f_{M}({\bf x}) = \sum_{m=1}^M \beta_m \:\psi({\bf x};\delta_m)$
where $\beta_m$, $m=1,2,\ldots,M$ are the expansion coefficients, and $\psi({\bf x};\delta_m) \in {\mathcal R}$ are the basis functions characterized by the parameters $\delta_m$, $m=1,2,\ldots,M$, and $M$ is the number of basis functions ($M=d_{max}$). This model is fit by minimizing a loss function averaged over the training data, that is:
$\min_{\{\beta_m,\delta_m\}^M_{1}} \sum_{i=1}^n exp(-y_i f_M({\bf x}_i))$
where an exponential loss function is used. In forward stagewise additive modeling the basis functions are added one at a time and, the coefficient and the basis function parameter $(\beta_m,\delta_m)$ are optimized by keeping the coefficients and parameters of the previously chosen basis functions constant. That is, $\{\beta_m,\delta_m\}= arg \min_{\beta,\delta} exp(-y_i f_{m-1}({\bf x}_i)+\beta \psi({\bf x};\delta)), m>1$. Friedman et al~\cite{Friedman00} also presented a related loss function that is based on the binomial likelihood, given by: $\sum_{i=1}^n \log(1+exp(-2y_i f_M({\bf x}_i)))$.
With this view, we consider the SGPC design as constructing a forward stagewise additive model.
Before we show the equivalences between the selection of a basis function and its coefficient to the selection of a basis vector $(j)$ and its site parameters ($p_j,m_j$), we define an objective function (called predictive loss function) that we propose to use to select a basis function and its coefficient in each iteration of the SGPC design algorithm:
\begin{equation}
\mbox{NLP}_{a}(\{\bu \cup j\}, \bftheta) = - {1 \over n} \sum_{i=1}^{n} \log \Phi\left(\frac{y_i (\hat{f}_i+b)}{\sqrt{1+{\bA}_{ii}}}\right)
\label{nlpboost1}
\end{equation}
where $j \in \buc$ and, ${\hat f}_i, {\bA}_{ii}$ are computed using $\{\bu \cup j\}$. This objective function has a behavior similar to the exponential and binomial likelihood loss functions mentioned above and, is also an upper bound on the training set error. That is, we have:
\begin{equation}
{1 \over n} |\{i: sgn({\hat f}({\bf x}_i)+b) \ne y_i\}| \;\le\; {1 \over {\log(2) }} \;\mbox{NLP}_{a}({\bu}, \bftheta)
\label{inequality}
\end{equation}
Here the left hand side represents the training set error. The inequality follows from noting that $0\le \Phi(z)\le 1$, $\Phi(0)=0.5$ and that $-\log(\Phi(z))$ monotonically decreases in the interval $(-\infty,\infty)$. Note that $\log(\Phi(0))=-\log(2)$ and is required for appropriate scaling so that $-{{\log(\Phi(z))} \over {\log(2)}}\ge 1\; when \;z\le 0$. Thus (\ref{nlpboost1}) is an upper bound on the training set error.
\noindent{{\bf Comparison of Objective Functions:}} Firstly, unlike the exponential loss function used in boosting, the function $\Phi(\cdot)$ is not separable. That is, the linear combination of basis functions that appear inside $\Phi(\cdot)$ cannot be written as a product of individual terms. This separability property of the exponential function is useful for the interpretation of building successive weak classifiers on the training data with weighted distribution. However, keeping all the previous basis functions with the associated coefficients fixed and, optimizing over {\it only} an additional basis function along with its coefficient using (\ref{nlpboost1}) essentially has the same desirable effect. It may be noted that like (\ref{nlpboost1}), the binomial log likelihood is not separable in strict sense (without any approximation). Secondly, the GP classifier has the advantage of providing predictive variance information which is useful in moderating the predictive probability. Specifically when the uncertainty or variance is large, this probability gets reduced accordingly. This is very important particularly when the data points are sparse in a certain region of the input space or when the data is noisy. Thus, use of (\ref{nlpboost1}) would be more robust. The behaviors of $-\log(\Phi(\cdot))$ with and without moderation along with the other loss functions are shown in Figure~\ref{FigA}.
\begin{figure}
\begin{center}
{
\vskip -0.2in
\includegraphics[width=6cm,height=4cm]{objfns_new.eps}
}
\vskip -0.05in
\caption{{\scriptsize Exponential, binomial log likelihood, $-{{\log(\Phi(\cdot))} \over{\log(2)}}$ functions with and without moderation. In the moderation case, the variance was set to 0.5. Zero variance corresponds to no moderation. A reference function that takes unit value is also shown.}}
\vspace{-0.4in}
\label{FigA}
\end{center}
\end{figure}
\noindent{\bf Forward Stagewise Additive Model View:} We now show using (\ref{eq:eqn5}) that the SGPC design using Algorithm 1 with (\ref{nlpboost1}) as the objective function (to select the basis vectors and their coefficients) is equivalent to building a forward stagewise additive model. In particular, a basis vector selection results in a basis function choice and the coefficient optimization essentially results in its site parameters estimation in each iteration (steps 3 and 4 in Algorithm 1). Note that the notions of stage and iteration in Algorithm 1 are equivalent. First, let us look at the steps 3 and 4 of Algorithm 1 more closely. After selecting a basis vector $j$ and updating its site parameters $(p_j,m_j)$ at the $t$-th iteration, the following posterior variance and mean update can be obtained by simplifying (\ref{eq:eqn5}):
\begin{equation}
\mbox{diag}(\bA)^{(t+1)} := \mbox{diag}(\bA)^{(t)} - \eta_j \tilde{\bf k}^2_{.,j}, \;\; \hat{\bbf}^{(t+1)} := \hat{\bbf}^{(t)} + {\tilde \alpha}_j \tilde{\bf k}_{.,j}
\label{eq:eqn6}
\end{equation}
where $\tilde{\bf k}_{.,j}=({\bf k}_{.,j}-{\bf k}_{.,\bu_{t}}\bPi^{1\over2}_{\bu_{t}}{\bf B}^{-1}_{\bu_{t}}\bPi^{1\over2}{\bf k}_{\bu_{t},.})$ and $\bu_t$ is the basis vector set at the $t$-th iteration. Here $\eta_j={{p_j}\over{1+p_j{\bA}^{(t)}_{jj}}}$ and ${\tilde{\alpha}}_j=\eta_j (m_j-{\hat f}^{(t)}_j)$. Note that $\eta_j\ge 0$. Then the process of adding the $j$th basis vector is equivalent to adding a basis function $\tilde{\bf k}({\bf x},{\bf x}_j)$. That is, we can define the additive model function for SGPC as: ${\hat f}^{(t+1)}({\bf x}) = {\hat f}^{(t)}({\bf x}) + {\tilde \alpha}_j {\tilde k}({\bf x},{\bf x}_j)$. Here, ${\tilde k}({\bf x},{\bf x}_j)=k({\bf x},{\bf x}_j)-{\bf k}({\bf x},{\bf x}_{\bu_{t}})\bPi^{1\over2}_{\bu_{t}}{\bf B}^{-1}_{\bu_{t}}\bPi^{1\over2}{\bf k}({\bf x}_{\bu_{t}},{\bf x})$ (where ${\bf k}({\bf x},{\bf x}_{\bu_{t}})$ is a row vector of size $|\bu_{t}|$), and is dependent on the input ${\bf x}_j$ through $k({\bf x},{\bf x}_j)$, the previously chosen functions and their site parameters. Note that in both the ADF approximation and the proposed methods, the site parameters of the previously selected basis vectors are not updated whenever a new basis vector is added. This is done to reduce the computational complexity. Next, we can see that the choice of $\tilde {\alpha}_j$ is dependent on the site parameters $m_j$ and $p_j$. This is because ${\hat f}^{(t)}_j$ and ${\bA}^{(t)}_{jj}$ are fixed once the $j$th basis vector is chosen. Now, relating ${\hat f}^{(t+1)}({\bf x})$ to the predictive mean vector in (\ref{eq:eqn6}), we see that the predictive mean vector is nothing but the evaluation of the function ${\hat f}^{(t+1)}({\bf x})$ for the training inputs ${\bf x}_i, i=1,\ldots,n$. Therefore, selection of the $j$th basis vector and estimation of its site parameters ($p_j,m_j)$ in each iteration (stage) of the SGPC design algorithm essentially determine the basis function ${\tilde k}({\bf x},{\bf x}_j)$ and its coefficient ${\tilde \alpha}_j$. To summarize, we have the final classifier function (excluding the bias hyperparameter $b$) and the predictive variance on an input ${\bf x}$ as:
\begin{eqnarray}
{\hat f}({\bf x})=\sum_{i=1}^{d_{max}} {\tilde \alpha}_i {\tilde k}({\bf x},{\bf x}_i) \\
{\hat \sigma}^2({\bf x}) = k({\bf x},{\bf x})-\sum_{i=1}^{d_{max}} \eta_i {\tilde k}({\bf x},{\bf x}_i)
\label{eq:sgpc}
\end{eqnarray}
Note that the expression for ${\hat \sigma}^2({\bf x})$ follows from the expression for $\mbox{diag}(\bA)^{(t+1)}$ on the left hand side of (\ref{eq:eqn6}). It is interesting to see that the variance is a non-increasing function as more and more basis functions are added. Having shown the equivalence, we next show how the $j$th basis function and the associated coefficient $\tilde {\alpha}_j$ can be obtained by optimizing (\ref{nlpboost1}) in each iteration. As we have seen before, the choice of a basis vector determines the basis function and we describe next how this selection is done.
\noindent{\bf Basis Vector Selection Method:} From efficiency viewpoint, we propose to select a basis vector as:
\begin{equation}
j =\arg\min_{i \in {\bf J}} \mbox{NLP}_{a}(\{\bu \cup i\}, \bftheta).
\label{iboost}
\end{equation}
where {\bf J}, a working set, is a randomly chosen subset of $\buc$, $|{\bf J}|$=min($\kappa$,$|\buc|$) and $\kappa$ can be set to 59~\cite{Smola01}. To select one basis vector using (\ref{iboost}) the computational cost is $O(\kappa nd_{max})$. Therefore a method to reduce the factor $\kappa$ without significantly degrading generalization performance will be very useful. We achieve this by changing the sampling strategy (from random sampling) used to construct the working set ${\bf J}$. In the proposed adaptive sampling technique, we construct ${\bf J}$ by sampling from $\buc$ according to a distribution that changes after a basis vector is added in each iteration. The sampling distribution is given by:
\begin{equation}
\chi^{(t+1)}_{j\in \buc_t} = {1 \over {V^{(t)}}} \Bigl(1 - \Phi\bigl({{y_j ({\hat f}^{(t)}_j+b)} \over \sqrt{{1+{\bf A}^{(t)}_{jj}}}}\bigr)\Bigr)
\label{distribution}
\end{equation}
where $V^{(t)}$ is a normalizing constant. Here, ${\hat {\bf f}}^{(t)}_j$ and ${\bf A}^{(t)}_{jj}$ are computed using the basis vectors in $\bu_t$. Since ${\hat f}$ and ${\bf A}$ change after inclusion of every basis vector in the inner loop, the distribution also changes and the sampling becomes adaptive.
To understand why such a sampling along with (\ref{iboost}) would be useful, we can see that if $\Phi(\cdot) \rightarrow 1$ (for a correctly classified example with high predictive probability), then the probability of selecting such an example as a basis vector will be relatively small. On the other hand, the probability of selecting a misclassified example with low predictive probability (that is, $\Phi(\cdot) \rightarrow 0$) will be relatively high. We found that selecting the most violated example (that is, the example with the least $\Phi(\cdot)$ in $\buc$) in each iteration results in poor basis vector selection for noisy and difficult datasets. The adaptive sampling technique can safeguard against such a selection and is robust across different datasets. Next, the sign of ${\tilde \alpha}_j$ in (\ref{eq:eqn6}) gets adjusted in such a way that ${\hat {\bf f}}^{(t+1)}$ moves in the desired direction for a given $\tilde {\bf k}_{.,j}$. This desired movement is expected to happen for all the examples having same class label that are close enough to the $j$th example. Therefore, with a choice of an example (having low value of $\Phi(\cdot)$), ${\hat {\bf f}}^{(t+1)}$ moving in the desired direction and variance $diag({\bf A})^{(t+1)}$ non-increasing, we expect the NLP value in (\ref{nlpboost1}) to improve particularly for the examples with wrong predictions or low predictive probability. In this sense the basis vector selection using (\ref{iboost}) and (\ref{distribution}) tends to mimic the selection of a base classifier in boosting~\cite{Schapire98} that minimizes the training set error with weighted distribution. This helps in getting a better generalization performance for a fixed $\kappa$ compared to random sampling. Alternatively, $\kappa$ can be reduced to get the same generalization performance. Experimental results support these claims.
\noindent{\bf Site Parameters Optimization Method:} Having constructed the working set {\bf J} using the adaptive sampling technique, we optimize (\ref{nlpboost1}) to find $\tilde{\alpha}_i$ for each basis vector $i\in {\bf J}$. As shown earlier optimizing over $\tilde {\alpha}_i$ is equivalent to optimizing over the site parameters $m_i$ and $p_i$ for a given basis vector. Essentially we have a two dimensional ($m_i,p_i)$ non-linear optimization problem. Note that it is a constrained optimization problem (under certain condition given below) since the posterior variance $diag(\bA)^{(t+1)}$ should be non-negative after every iteration. Assuming that $diag(\bA)^{(t)}$ is non-negative it turns out that $p_i$ must satisfy: $ {\tilde {\eta}}_i \ge \eta_i={{p_i}\over{1+p_i{\bf A}^{(t)}_{ii}}}$ where $\tilde {\eta}_i=min_l\;\{{{\bA^{(t)}_{ll}} \over {{\tilde k}^2_{l,i}}}\}$. On further simplification we find that if $\tilde {\eta}_i{\bA}^{(t)}_{ii}\ge 1$ then we have an unconstrained optimization problem (in $\tau_i$ when we work with $p_i=\exp(\tau_i)$); otherwise we have a constrained optimization problem with $0\le p_i \le {{\tilde {\eta}_i} \over {1-\tilde {\eta}_i{\bA}^{(t)}_{ii}}}$. This can be solved using any standard nonlinear optimization technique. {\it To summarize, we construct ${\bf J}$ using adaptive sampling, optimize $\tilde {\alpha}_i,\;\forall i\in {\bf J}$ and select the basis vector using (\ref{iboost}).}
\section{Introduction}
\label{introduction}
Sparse Gaussian Process (GP) classifier design aims at addressing the issues of high computational and storage costs associated with learning a full model GP ($O(n^3)$ and $O(n^2)$ respectively)\cite{Rasmussen06} using $n$ training examples, and involves using a representative data set, called the {\em basis vector} set, from the input space. In this way, the computational and memory requirements are reduced to $O(n d^2_{max})$ and $O(n d_{max})$ respectively, where $d_{max}$ is the size of the basis vector set ($d_{max} \ll n$). Further, the costs of predictive mean and variance computations for an example are reduced from $O(n)$ and $O(n^2)$ to $O(d_{max})$ and $O(d^2_{max})$ respectively.
In this work, we focus on developing an efficient Sparse Gaussian Process Classifier (SGPC) design algorithm. Several approaches have been proposed in the literature to design sparse GP classifiers. These include on-line GP learning \cite{Csato02} and entropy or information gain based Informative Vector Machine (IVM) \cite{Lawrence03,Seeger07}. Particularly relevant to this work is IVM which is inspired by the technique of assumed density filtering (ADF)~\cite{Minka01,Csato02}. In general, an SGPC design algorithm using the ADF approximation involves site parameter estimation, basis vector selection and hyperparameter optimization. While the site parameters are estimated using a moment matching technique in the ADF approximation, hyperparameters are estimated by optimizing marginal likelihood or negative logarithm of predictive probability (NLP)~\cite{Rasmussen06}. Different methods to select the basis vectors include entropy, information gain and validation based methods~\cite{Shirish08}. Experimental comparisons of the IVM with entropy based method and validation based method on various benchmark datasets showed that though the IVM method is efficient, it does not generalize well particularly on difficult datasets, and it requires more number of basis vectors to achieve similar generalization performance compared to the validation based method. Though the validation based method generalizes well, it is computationally expensive. Therefore, there is a need to have an efficient algorithm to design SGPCs that generalize well.
{\bf Contributions:}
Viewing SGPC design as construction of an additive model (that is, a linear combination of basis functions)~\cite{Friedman00}, a basis vector addition can be seen as adding a basis function in each iteration like in boosting~\cite{Schapire98}. With this view we introduce new methods to select the basis vectors and, estimate their site parameters by optimizing a predictive loss function. These estimated site parameters determine the coefficient of the basis function in the additive model. Further, an adaptive sampling based basis vector selection method is proposed, which aids in effective basis vector selection and computational cost reduction. The proposed basis vector selection method has same computational complexity as used by IVM. We also compare the generalization performance of various basis vector selection methods. Experimental results show that the proposed method gives comparable or better performance on a wide range of real-world large datasets. In particular, the proposed method is significantly better compared to the entropy and information gain based methods for relatively smaller $d_{max}$ values or on difficult datasets.
The paper is organized as follows. Section 2 presents an SGPC design algorithm with the ADF approximation. The proposed methods and implementation aspects are given in Section 3. Section 4 covers related work. Experimental results are presented in Section 5 and the paper concludes with Section 6.
\section{GP and Sparse GP Classification}
Given a training data set with input-output pairs $\calD=\{{\bf x}_i, y_i\}^{n}_{i=1}$ where ${\bf x}_i$ $\in$ $R^d$ and $y_i$ $\in$ $\{+1,-1\}$,
the goal is to design a GP classifier that generalizes well. In standard GPs for classification \cite{Rasmussen06}, true function value at each ${\bf x}_i$ is represented as a latent random variable $f({\bf x}_i)$. Let us denote $f({\bf x}_i)$ by $f_i$. The prior distribution of $\{{\bf f}({\bf X}_n)\}$ is a zero mean multivariate joint Gaussian, denoted as ${\it p}({\bf f})\:=\:{\mathcal N}(\cdot;{\bf 0},{\bf K})$, where ${\bf f}\:=\:[f_1,\ldots,f_n]^T$, ${\bf X}_n\:=\:[{\bf x}_1,\ldots,{\bf x}_n]$, and ${\bf K}$ is an $n \times n$ covariance matrix whose $(i,j)^{th}$ element is $k({\bf x}_i,{\bf x}_j)$
An example covariance function is the squared exponential function: $k({\bf x}_i,{\bf x}_j)\:=\:v_0\:\exp(-\frac{1}{2} \sum_{m=1}^d \frac{{(x_{i,m}-x_{j,m})^2}}{\sigma^2})$. Here, $v_0$ and $\sigma^2$ denote the signal variance and kernel width respectively.
In this work we use the probit noise model, $p(y_i|f_i,\lambda,b)$ = $\Phi(\lambda y_i(f_i+b))$ where $\Phi(\cdot)$ is the cumulative distribution of the standard Gaussian ${\mathcal N}(\cdot;0,1)$ with zero mean and unit variance, the slope of which is controlled by $\lambda$($>$0) and $b$ is a bias hyperparameter. With independent, identical distribution assumption, we have ${\it p}({\bf y}|{\bf f},\bfgamma)\:=\:\prod_{i=1}^n p(y_i|f_i;\bfgamma)$ where $\bfgamma = [\lambda,\:\;b]$. Let $\bftheta \:=\:[v_0, \sigma^2, \bfgamma]$ denote the hyperparameters that characterize the GP model. With these modeling assumptions, the expressions for latent posterior and predictive distributions are available~\cite{Rasmussen06}.
In SGPC design using the ADF approximation \cite{Lawrence03}, a factorized form of $q_{\bu}(\bf f|\calD,\bftheta)$ (given below) is made use of, to build an
approximation to $p({\bf f}|{\calD},\bftheta)$ in an incremental fashion.
Let $\bu$ denote the index set of the training examples which are included
in the approximation. Then we have
\begin{equation}
q_{\bu}({\bbf}|\calD,\bftheta) \propto {\mathcal N}(\bbf;{\bf 0},{\bK}) \prod_{i \in {\bu}} \exp\left\{
-\frac{p_i}{2} {(f_i - m_i)}^2 \right\}
\label{qi}
\end{equation}
and $p({\bf f}|{\calD},\bftheta) \approx q_{\bu}({\bf f}|{\calD},\bftheta) = {\cal N}(\bbf;\hat{\bf f}, {\bf A})$ where ${\bf A} = {({\bf K}^{-1}+ \bPi)}^{-1}$ and $\hat{\bf f} = {\bf A} \bPi {\bm}$, ${\bf m} = {(m_1,\ldots,m_n)}^T$ and $\bPi = {\rm diag}{(p_1,\ldots,p_n)}$. The parameters $m_i$ and $p_i$, $i=1\rightarrow n$ are called the site function parameters and the set ${\bu}$ is called the {\em active} or {\em basis vector} set. Note that $\bu$ is actually associated with the inputs $\bX_{\bu}$. We refer to $\bu^c=\{1,2,\ldots,n\} \setminus {\bu}$ as the non-active set. In practice, the active set size $|\bu|$ is restricted by the user specified parameter, $d_{max}$. Note that the site function parameters corresponding to $\bu^c$ are zero. Thus a SGPC model is defined by the basis vector set ${\bu}$, its associated site function parameters $({\bf m}_{\bu},\bPi_{\bu})$ and the hyperparameters $\bftheta$. In general, SGPC design algorithms differ with respect to the basis vector selection, site parameter estimation and hyperparameters optimization methods. A typical SGPC design algorithm using the ADF approximation is given in Algorithm 1.
\begin{algorithm}
\caption{SGPC Design}
\begin{algorithmic}
\STATE 1. Initialize the hyperparameters $\bftheta$. Set $d_{max}$, $tol$, $iter_{max}$ and, $iter$=0.
\REPEAT
\STATE 2. Initialize ${\bf A} := {\bf K}, \bu = \{\}, \buc = \{ 1,2,\ldots,n\}, \hat{f}_i = p_i = m_i = 0 \; \forall \; i \in \buc$. $iter=iter+1$.
\REPEAT
\STATE 3. Select a basis vector $j$ from $\buc$ as per the chosen basis vector selection method.
\STATE 4. Update the site parameters $p_j$, $m_j$, posterior mean ($\hat{\bf f}$) and
variance ($\mbox{diag}(\bA)$).
\STATE 5. Set $\bu = \bu \cup \{j\}$ and $\buc = \buc \setminus \{j\}$.
\UNTIL{$|\bu| = d_{max}$}
\STATE 6. Re-estimate the hyperparameters $\bftheta$ by optimizing a suitable loss function, keeping $\bu$ and the corresponding site parameters constant.
\UNTIL{$iter = iter_{max}$ or change in the loss function value $<~tol$}
\vspace{-0.05in}
\end{algorithmic}
\end{algorithm}
We now briefly describe the ADF approximation method \cite{Lawrence03} to implement step 4. Suppose that an example index $j$ is added to the current basis vector set ${\bu}$. Let ${\bar \bu}_j=\bu \cup \{j\}$. After updating the site function parameters $p_j$ and $m_j$, incremental calculations are carried out to update $\bbfhat$ and $\mbox{diag}(\bA)$ corresponding to ${\bar \bu}_j$. This is achieved by maintaining two matrices $\bL$ and $\bM$ where $\bL$ is the lower-triangular Cholesky factor of ${\bf B}={\bI} + \bPi^{1/2}_{\bu,\bu} {\bK}_{\bu,\bu} \bPi^{1/2}_{\bu,\bu}$ and
${\bM} = {\bL}^{-1} \bPi_{\bu,\bu} {\bK}_{\bu,\cdot}$\footnote{\tiny{The subscript, $(\bu,\bu)$, of a matrix is used to represent the rows and columns of the matrix corresponding to the elements of the set $\bu$. The subscript, $(\bu,.)$ denotes the rows of the matrix corresponding to the elements of the set $\bu$.}}. Note that $\bA = \bK - {\bM}^T {\bM}$. However, only the diagonal elements of $\bA$ are needed in the algorithm and are updated as given in (\ref{eq:eqn5}) below. Assuming $\lambda=1$, with $z_j = \frac{y_j (\hat{f_j}+b)}{\sqrt{1+ A_{jj}}}, \;
\alpha_j = \frac{y_j {\mathcal N}(z_j;0,1)}{\Phi(z_j)} \sqrt{\frac{1}{1+A_{jj}}}, \;
\nu_j = \alpha_j \left( \alpha_j + \frac{(\hat{f}_j+b)}{1+A_{jj}} \right)$ the site function parameters are updated as:
\begin{equation}
p_j = \frac{\nu_j}{1-A_{jj} \nu_j},\;\; m_j = \hat{f_j} + \frac{\alpha_j}{\nu_j}.
\label{eq:eqn2}
\end{equation}
Let ${\bbl} = \sqrt{p_j} {\bM}_{\cdot,j},\;\; l = \sqrt{1+p_j \bK_{j,j} - {\bbl}^T{\bbl}}, \;\; \Mu = {l}^{-1} (\sqrt{p_j} \bK_{\cdot,j} - {\bM}^T {\bbl})$. Then $\bM$ is updated by appending the row vector ${\Mu}^T$ and $\bL$ is updated by appending $[{\bL}\:{\bf 0}]$ with $[{\bbl}^T\:l]$.
The posterior variance and mean are updated as:
\begin{equation}
\mbox{diag}(\bA) := \mbox{diag}(\bA) - \Mu^2, \;\; \hat{\bbf} := \hat{\bbf} + \alpha_j l p^{-1/2}_j \Mu.
\label{eq:eqn5}
\end{equation}
In (\ref{eq:eqn5}), ${\Mu}^2$ denotes squaring of each element in $\Mu$.
In the outer loop the hyperparameters are optimized by maximizing the marginal likelihood (ML)~\cite{Lawrence03}, $q_{\bu}({\bf y}|{\bf X},\bftheta)=\int p({\bf y}|{\bf f},\bfgamma) q_{\bu}({\bf f}|\calD,\bftheta) d{\bf f}$ or minimizing the negative logarithm of predictive probability (NLP) loss (under cumulative Gaussian noise model)~\cite{Shirish08},
\begin{equation}
\mbox{NLP}({\bu}, \bftheta) = -\frac{1}{|{\buc}|} \sum_{i \in \buc} \log \Phi\left(\frac{y_i (\hat{f}_i+b)}{\sqrt{1+{\bA}_{ii}}}\right).
\label{nlp}
\end{equation}
Finally the predictive target distribution for an unseen input $x_*$ is given by:
$q_{\bu}(y_\ast|{\bx}_\ast) = \Phi\left( \frac{y_\ast (\hat{f}_\ast + b)}{\sqrt{1+\sigma^2_\ast}}\right)$
where ${\hat f}_*={\bf k}_{*,\bu}\bPi^{1\over2}_{\bu}{\bf B}^{-1}\bPi^{1\over 2}_{\bu}{\bf m}_{\bu}$ and $\sigma^2_{*}=k({\bf x}_*,{\bf x}_*)-{\bf k}_{*,\bu}\bPi^{1\over2}_{\bu}{\bf B}^{-1}\bPi^{1\over 2}_{\bu}{\bf k}_{\bu,*}$. In the next section we propose new methods for effective basis vector selection and site parameters optimization (steps 3 and 4 in Algorithm 1).
\input{ProposedMethod}
\begin{figure*}
\begin{center}
{
\includegraphics[width=5.5cm,height=5cm]{waveform_boost_BOTH1.eps}
}
\hskip 0.1in
{
\includegraphics[width=5.5cm,height=5cm]{image_boost_BOTH1.eps}
}
\vskip -0.15in
\caption{{\scriptsize The left panel (group of 4 plots) corresponds to Waveform dataset and the right panel corresponds to Image dataset. The first and second rows show the training/test set errors and NLP loss as the basis vectors are added in the inner loop just before termination. The solid-red and dashed-blue lines correspond to $\tilde{\alpha}$ with moment matching and constrained optimization cases with ADF approximation. In this experiment we set $\kappa=2$.}}
\vspace{-0.4in}
\label{Fig2}
\end{center}
\end{figure*}
\section{Related work}
In this section we briefly describe three closely related methods that we compare with the proposed method.
In entropy based method~\cite{Lawrence03}, a basis vector is chosen according to the change in the entropy of the posterior process (\ref{qi}) after inclusion in the model and is given by: $j=\arg\min_{i\in\buc} \log({\bar \lambda}_i)$ where ${\bar \lambda}_i=1-\nu_i{\bf A}_{ii}$. In information gain based method~\cite{Seeger07}, a basis vector is chosen according to the information gain (which is defined as negative of the Kullback-Leibler divergence) obtained from the posterior process after inclusion in the model and is given by: $j=\arg\min_{i\in\buc} \{-\log({\bar \lambda}_i)+{1\over{{\bar \lambda}_i}}+{{({\hat f}^{'}_{i}-{\hat f}_i)^2} \over {\bf A}_{ii}}\}$. Here ${\hat f}_i$ and ${\hat f}^{'}_i$ denote the predictive mean before and after the inclusion of the $i$th basis vector. Compared to the entropy based selection, this method takes the predictive mean also into account and differs from the way ${\bar \lambda}_i$ is traded-off between the first and second term. Both these methods are very efficient since the relevant quantities that are needed to compute the appropriate measure for the basis vector selection are maintained throughout in the inner loop of Algorithm 1. Both these methods maximize the marginal likelihood for hyperparameter optimization.
In validation based method~\cite{Shirish08}, a working set ${\bf J} \subseteq \buc$ of fixed size $\kappa$ ($\kappa=\min({|\buc|}, 59))$ is constructed by sampling randomly from $\buc$. The basis vector that minimizes (\ref{nlp}) is chosen and this involves computation of a new NLP value after inclusion for each $i\in {\bf J}$. Thus the computational cost for one basis vector selection is $O(\kappa n d_{max})$. The hyperparameters are selected by minimizing (\ref{nlp}).
While all the three methods use moment matching with the ADF approximation to estimate the site parameters (\ref{eq:eqn2}), the proposed site parameters optimization method provides an alternate way to estimate these parameters. Note that one can also use (\ref{eq:eqn2}) in conjunction with the proposed adaptive sampling based basis vector selection method. On comparing the objective functions used by the proposed method and validation based method, we see that the form of (\ref{nlp}) is same that of (\ref{nlpboost1}) except that the summation happens only over $\buc$. While the validation based method viewed (\ref{nlp}) as obtaining the NLP performance estimate with a validation set, (\ref{nlpboost1}) is motivated from the additive modeling viewpoint and, minimizing an upper bound on the training set error. Furthermore, the validation based method uses fixed uniform sampling instead of adaptive sampling. Note that the difference between (\ref{nlp}) and (\ref{nlpboost1}) is expected to be insignificant when $d_{max}\ll n$ (usually the case in SGPC design with large datasets) and this condition is important to avoid any overfitting.
\section{Experiments}
The summary of the datasets used in the experiments is given in Table 1. These datasets are part of Gunnar Raetsch's benchmark datasets available at \url{http://theoval.cmp.uea.ac.uk/~gcc/matlab/default.html
}. We changed the training and test set sizes of the top five datasets in Table 1 to demonstrate the effectiveness of the proposed method on large datasets. For the first four datasets we picked top $3600$ test examples from the original test set partition and added to the training set. The remaining examples were used as the test set. Note that this construction however results in reduction of the test set size. In the case of {\it Splice} dataset, we picked the top $1000$ examples from the test set partition. The modified train and test set sizes are shown Table 1. We considered only the first 25 partitions of the first four datasets. In all the experiments we used the squared exponential covariance function and Algorithm 1 described in Section 2. A conjugate gradient method was used to optimize (\ref{nlp}) (unless otherwise specified) in the outer loop for optimizing the hyperparameters, and $iter_{max}$ was set to 20. We kept track of the best model based on the NLP loss value after every outer loop iteration. For comparison, we evaluated the test set error and NLP loss performance.
\begin{table}
\begin{center}
\vspace{-0.25in}
\caption{\scriptsize{Datasets Description. $n$ and $m$ denote the training and test set sizes. $d$ and $pt$ denote the input dimension and number of partitions.}}
\begin{small}
\begin{tabular}{|l|c|c|c|c|} \hline
Dataset & $n$ & $m$ & $d$ & $pt$ \\ \hline
\textsf{Banana} & 4000 & 1300 & 2 & 25 \\ \hline
\textsf{Waveform} & 4000 & 1000 & 21 & 25 \\ \hline
\textsf{Twonorm} & 4000 & 3400 & 20 & 25 \\ \hline
\textsf{Ringnorm} & 4000 & 3400 & 20 & 25 \\ \hline
\textsf{Splice} & 2000 & 1175 & 60 & 20 \\ \hline
\textsf{Image} & 1300 & 1010 & 18 & 20 \\ \hline
\end{tabular}
\vspace{-0.4in}
\end{small}
\end{center}
\label{Datasets}
\end{table}
We conducted three experiments. Due to the space constraints we present only selected results. In the first experiment we illustrate the effectiveness of the proposed method of site parameters (equivalently, $\tilde{\bfalp}$) optimization. The results on one partition of the \textsf{Waveform} and \textsf{Image} datasets are shown in Figure 2. This method is compared against using (\ref{eq:eqn2}) for site parameters optimization. Although some minor variations were seen between the two methods, statistical analysis showed that the performance differences were not significant. Thus, the constrained optimization is an effective alternate method to estimate the site parameters. We now discuss certain practical aspects of this optimization. During optimization, the variance can become zero (within numerical accuracy), for some choice of the hyperparameter values and, also due to the greedy nature of the basis vector selection method. While this can be handled in some way (for example by exiting the inner loop), optimizing over individual $\tilde {\alpha}_i$'s can become slightly expensive for large datasets. Note that the function and gradient computations are linear in $n$. We can control the optimization cost by restricting the number of function and gradient evaluations with some inaccuracy in the solution. Therefore, the proposed optimization is also efficient.
In the next two experiments, we kept the site parameter estimation (using (\ref{eq:eqn2})) and the hyperparameter estimation (using (\ref{nlp})) same, and only changed the basis vector selection method in the step 3 of Algorithm 1. This is because our goal here is to compare the quality of the different basis vector selection methods. First, we demonstrate the effectiveness of the adaptive sampling method in the basis vector selection. This is done by comparing it with random (uniform) sampling method. We conducted this experiment on all the datasets given in Table 1. The test set error and NLP loss performance results on two datasets are given in Figure~\ref{fig3} (left panel) for two different values of $d_{max}$. These results were obtained by averaging the performance over the partitions. We found that the adaptive sampling method consistently performed better across all the datasets, particularly with respect to the NLP loss measure. This is because the choice of the basis vectors made by the adaptive sampling method is based on the predictive distribution. We also observed improved test set error performance on several cases. It was also observed that the performance difference reduces as the working set size $\kappa$ increases. It can also be seen that $\kappa$ value of 2 is sufficient for the adaptive sampling method to get similar NLP generalization performance as the validation based method (see the second column in the left panel of Figure~\ref{fig3}).
In the third experiment, we compared the performance of the proposed method, validation based method, entropy and information gain based basis vector selection methods. In the case of proposed method, we evaluated the performance with $\kappa=$ 1 and 2, thus ensuring that the complexity for the basis vector selection is the same as that of the entropy and information gain based methods. We conducted this experiment for four different values of $d_{max}$ (40, 80, 160 and 320) on all the datasets given in Table 1. The test set error and NLP loss performance on three datasets are shown in Figure~\ref{fig3} (right panel). They were obtained by averaging the performance over the partitions. We compared the performance of various methods using statistical significance tests. We first conducted Wilcoxon test on the test set error and NLP loss obtained from the partitions, on each dataset. All the observations from the tests below are made at the significance level of 0.05. The results indicated better test set error and NLP performance of the proposed method over the entropy and information gain based methods on almost all the datasets. Specifically we observed that the proposed method performed better on difficult datasets (relatively higher test set errors) like \textsf{Banana}, \textsf{Waveform} and \textsf{Splice} for all values of $d_{max}$ with respect to (w.r.t.) both the measures. On \textsf{Twonorm} and \textsf{Ringnorm} datasets it performed better w.r.t. the NLP loss measure for all the values of $d_{max}$. While it performed better than the entropy based method on the \textsf{Ringnorm} dataset for all values of $d_{max}$ w.r.t. the test set error, the performance was the same at higher values of $d_{max}$ in other cases. The information gain based method performed better than the entropy based method on the \textsf{Banana}, \textsf{Waveform} and \textsf{Ringnorm} datasets. The entropy based method performed better than the information gain based method w.r.t. the test set error in the case of \textsf{Twonorm} dataset. We observed that the entropy based method performed better than all the methods at lower values of $d_{max}$ on the \textsf{Image} dataset. On comparing the proposed method ($\kappa=$ 1 and 2) with the validation based method, we found that the validation based method performed better w.r.t. the test set error at lower values of $d_{max}$ (40 and 80).
Next, following \cite{Demsar06}, we conducted Friedman's test with six datasets (Table 1) and four methods, namely, the proposed method (with $\kappa$=2), validation, entropy and information gain based methods. To conduct this test, we used the average test set error and NLP values obtained from averaging over the partitions. The p-values obtained for the test set error and NLP measure were (0.02, 0.04, 0.04, 0.39) and (0.002, 0.002, 0.01, 0.09) respectively for four different values of $d_{max}$ (40, 80, 160 and 320) in that order. When $d_{max}$ was 320, the results were not significantly different w.r.t. both the measures. Since the null hypothesis was rejected for $d_{max}$ values of 40, 80 and 160, we next conducted the Bonferroni-Dunn post-hoc test to compare the proposed method with the other three methods. This test revealed that there were no significant differences between the proposed and validation based methods for all values of $d_{max}$ w.r.t. both the measures. On comparing the proposed method with the entropy and information gain based methods, we found that while the results were not significantly different w.r.t. the test set error, they were significant w.r.t. the NLP measure for lower $d_{max}$ values at 0.1 level. Overall, it was seen that the p-value became larger and the performance differences across the methods reduced as $d_{max}$ was increased.
Except for the validation based method ($\kappa=$ 59), all the methods required almost the same computational time for the basis vector selection. An approximate timing measurement of one inner loop (for $d_{max}$=80) showed that the proposed method with $\kappa=$ 1 took approximately 20 seconds for the \textsf{Banana} dataset (on a machine with 2 GB of RAM and dual core Intel CPU running at $1.83$ GHz). In general, we found that the proposed method was $5$ times faster than the validation based method on almost all the datasets. This comparison was based on the Matlab implementations of these methods. The speed improvement was not as high as $59$. We believe that efficient matrix based operations in Matlab helped the validation based method significantly and, expect the speed improvement to be higher with implementations in other programming languages like C.
\section{Conclusion}
We considered the problem of designing an SGPC from an additive model estimator viewpoint. We introduced new methods for basis vector selection and site parameters estimation based on the predictive loss function. An adaptive sampling method that aids in effective basis vector selection and computational complexity reduction was proposed. The proposed basis vector selection method has same computational and storage complexities as that used by IVM and, is thus suitable for large datasets. The experimental results showed better generalization performance of the proposed method on several benchmark datasets, particularly for relatively smaller $d_{max}$ values or on difficult datasets.
\begin{figure}
\vskip -0.1in
\begin{center}
{
\includegraphics[width=5.5cm,height=5.5cm]{Kappa_Plots_Waveform_40_80_Twonorm_40_80.eps}
}
\hskip 0.1in
{
\includegraphics[width=5.5cm,height=5.25cm]{dmax1SetBar59_3.eps}
}
\vskip -0.1in
\caption{{\scriptsize \textsf{\bf {Left Panel of eight plots}}: Test set error and NLP loss performance of the random sampling (dashed-red-square) and adaptive sampling (solid-blue-circle) methods for different values of $\kappa$. The dashed-dot-black line corresponds to the validation based method with $\kappa$=59. Top two rows correspond to \textsf{Waveform} dataset for $d_{max}$=40 and 80 (in that order). The bottom rows correspond to \textsf{Twonorm} dataset for $d_{max}$=40 and 80. \textsf{{\bf Right Panel of six plots}}: Test set performance of the various basis vector selection methods (entropy, information-gain, proposed method with $\kappa$=1 and 2, and validation based method ($\kappa$=59) (different gray shades) in that order) for different values of $d_{max}$ (40, 80, 160 and 320 correspond to the x-axis values of 1, 2, 3 and 4 respectively). Each row corresponds to one dataset. The results on \textsf{Banana}, \textsf{Waveform} and \textsf{Twonorm} datasets are given in that order.}}
\label{fig3}
\end{center}
\end{figure}
|
{
"timestamp": "2012-06-27T02:04:59",
"yymm": "1206",
"arxiv_id": "1206.6030",
"language": "en",
"url": "https://arxiv.org/abs/1206.6030"
}
|
\section{Introduction}
Loop quantum cosmology~\cite{LivRev,Springer} aims to develop and analyze cosmological models by
incorporating crucial guidance from the full theory of loop quantum gravity~\cite{ALRev, Rov,ThomasRev}.
Even though its systems cannot yet be derived completely, they constitute more than a~set of minisuperspace
models.
Characteristic ef\/fects of quantum geometry have been found in this setting, and contact with a~potential
full framework of quantum gravity has allowed one to f\/ix some choices left open in traditional models of
quantum cosmology.
Nevertheless, ambiguities remain in loop quantum cosmology and loop quantum gravity, to be described by
suf\/f\/iciently general parameterizations that might be restricted by phenomenological analysis.
Also for these parameterizations, contact with the full theory is essential: many dif\/ferent features
collapse on one single parameter when geometry is restricted to exact homogeneity.
Disentangling dif\/ferent contributors to one ef\/fect is important for estimates of typical ranges of
parameters.
Ambiguities notwithstanding, some general features have been found, foremost among them quantum
hyperbolicity~\cite{Sing,BSCG}, a~mechanism of singularity avoidance based on discrete structures of
evolution operators.
However, ef\/fective geometrical pictures of resolution mechanisms are dif\/f\/icult to derive since
several dif\/ferent quantum ef\/fects contribute, in addition to higher-curvature corrections also
quantum-geometry modif\/ications.
(Space-time as we know it may not even exist in extreme quantum phases~\cite{Action, ModifiedHorizon}.)
Specif\/ic details of physics in deep quantum regimes, for instance the values of upper bounds on the
energy density of matter, as another possible indicator of singularity resolution, are often unreliable
because the setting remains too reduced and too ambiguous.
An appropriate viewpoint is one akin to ef\/fective f\/ield theories, where one uses proposals for full
(but possibly incomplete) theories to derive generic low-energy properties.
With such a~view, the framework is empirically testable because it can give rise to potentially observable
phenomena, even if they cannot be predicted with certainty in all their parameterized details.
\looseness=-1 Developments in loop quantum cosmology have not always followed a~general view, especially
since the publication of~\cite{APS}, which stimulated a~line of research focusing with minutest detail on
pure minisuperspace models.
Strenuous contact with the full theory has been replaced by ad-hoc assumptions (for instance related to
degrees of freedom and scaling behaviors\footnote{See Sections~\ref{s:adhoc} and~\ref{s:Param}.} in
discrete structures); ambiguities (such as the so-called area gap and its postulated dynamical role) have
been f\/ixed by hand.
Valuable results have been produced, chief\/ly of mathematical interest, showing what discrete features and
non-standard quantum representations may imply; see e.g.\ \cite{SelfAdFlat,NonSelfAd,PhysEvolBI}.
For physical statements, however, the viewpoint espoused likely contains too many artefacts to be reliable.
This article adds additional items to the list of known minisuperspace limitations.
The main aim, however, is to provide a~general description of homogeneous reduced systems for quantum
cosmology, focusing on but not restricted to loop quantum gravity.
Since quantum cosmological models are beginning to be developed in approaches closely related to loop
quantum gravity, such as spin foams~\cite{SpinFoamRev, SurfaceSum,Rov:Loops}, it is important to state the
general setting of quantum cosmological models, and to point out limitations, dangers, and promises.
The following section begins with a~description and classif\/ication of symmetric models within full
(classical or quantum) theories, amended in later sections by specif\/ic details and discussions within
loop quantum cosmology and, brief\/ly, the spin-foam setting.
The f\/inal section will put these models in the general framework of ef\/fective theory.
Along the road, we will be led to several mathematical features overlooked so far.
Most importantly, Hilbert space representations based on functions on the Bohr compactif\/ication of the
real line do not properly capture all aspects of homogeneous connections; rather, they give rise to
a~degeneracy of two important parameters corresponding, in the full theory, to the edge length and
representation label of holonomies.
The origin of the degeneracy is identif\/ied here as a~mathematical coincidence realized in Abelian models
only.
To solve these problems, we will present a~new non-Abelian construction, work out a~detailed relation to
the full theory, and arrive at a~new viewpoint on dynamical dif\/ference equations with a~natural
implementation of lattice ref\/inement.
\section{Reduction}
\label{s:Reduction}
A classical reduced model, realizing a~given symmetry, uses an embedding ${\cal M}\to{\cal S}$ from the set~${\cal M}$ of symmetric geometries into full superspace~${\cal S}$.
A minisuperspace geometry as an element of ${\cal M}$ is specif\/ied by f\/initely many parameters~$a_I$
(which, to be specif\/ic, one may think of as the three scale factors of a~diagonal Bianchi model), mapped
to a~full metric $g_{ab}(a_I)$ by the classif\/ication of invariant metric tensors.
(A classif\/ication of invariant connections and triads is mathematically more
well-developed~\cite{Brodbeck, KobNom}; see~\cite{CUP, SymmRed} for applications in the present context.)
Inserting $g_{ab}(a_I)$ in equations of motion, the action or constraints of the full theory then produces
corresponding equations for the f\/initely many $a_I$.\footnote{The procedure of inserting symmetric
tensors may not commute with variations used to compute equations of motion~\cite{midisup2, midisup}:
symmetric actions or constraints do not always produce the correct symmetric equations of motion.
In our following discussions we make use of constraints, and therefore must assume that variation and
symmetrization commute.
In the Bianchi classif\/ication of homogeneous models, for instance, we are restricted to class
A~\cite{classAB}.}
We distinguish between a~minisuperspace model (a def\/inition of minisuperspace ${\cal M}$ with a~dynamical
f\/low on it) and the stronger notion of a~reduced model (a minisuperspace model including also an
embedding ${\cal M}\to{\cal S}$, making contact with the full theory).
There is not much of a~dif\/ference classically, but there is a~big one at the quantum level.
In quantum cosmology, most models remain in the minisuperspace setting, def\/ining some dynamical f\/low on
a~system with f\/initely many degrees of freedom such that the dynamics of general relativity follows in
a~semiclassical limit, perhaps with inspiration by but no derivation from some full theory that quantizes
${\cal S}$.
The key ingredient of reductions~-- making contact with a~full theory of quantum gravity, if only in
a~weak sense~-- most often is missing.
This article deals with the problem of reduction at the quantum level, going beyond pure minisuperspace
models.
\subsection{Reduction, selection, projection}
In addition to minisuperspace versus reduced models, it will be useful to distinguish between three
classical procedures of deriving symmetric systems beyond a~mere minisuperspace prescription:
\begin{description}\itemsep=0pt
\item[Reduction:] The def\/inition of a~reduced model, as already stated, contains two parts:
embedding ${\cal M}$ in ${\cal S}$ and deriving a~dynamical f\/low by inserting minisuperspace metrics into
the full equations.
After a~reduced model has been def\/ined, one can proceed to solve its equations and evaluate solutions for
potential predictions\footnote{``Embedding'' might be a~better name for this prescription, but
``reduction'' is much more standard.}.
\item[Selection:] Instead of starting with a~set of minisuperspace geometries and deriving a~reduced f\/low
from the full theory, one could f\/irst solve the full equations and then select symmetric ones that admit
a~set of Killing vector f\/ields with an algebra of the desired symmetry type.
In general relativity with its complicated and largely unknown solution space, this procedure is rather
impractical.
\item[Projection:] Or, again starting with solutions to the full equations, one may derive a~symmetric
geometry for every full solution by some kind of averaging process.
In addition to the problem of a~selection procedure, one would have to face the daunting averaging problem
well-known from cosmology~\cite{AveragingNonPert,Averaging}.
\end{description}
While reduction is a~standard classical procedure, selection or projection cannot be performed by current
means.
In quantum cosmology\footnote{A great danger in quantum cosmology is that its procedures amount to neither
reduction nor selection nor projection, but rather to {\em production}~-- not a~combination of {\em
pro}jection and re{\em duction} as the word might suggest, but just the presentation of an artif\/icial
model of unknown pedigree.}, the best one may attempt is therefore a~quantum version of reduction, going
beyond pure minisuperspace quantizations but not directly accessing the full solution space.
This is the topic of the present article.
\subsection{Quantum cosmology}
Several dif\/f\/iculties arise when one tries to extend classical reductions to quantum theory.
Unlike classically, symmetric solutions can no longer be exact: Inhomogeneous degrees of freedom are set to
zero, both for conf\/iguration and momentum variables.
The spatial metric at any time must be invariant under the given symmetry, and so do its rate of change or
extrinsic curvature for the metric to be able to remain invariant.
Setting non-symmetric modes of both canonical f\/ields to zero violates the uncertainty principle, and
symmetric quantum solutions cannot be exact solutions of the full theory.
Reduced quantum models can at best be approximations, but making sense of the approximation scheme in
a~clear way remains one of the outstanding challenges to be faced.
Mimicking classical constructions, the kinematical structure of quantum gravity can be reduced by making
use of a~mapping $\sigma\colon{\cal H}_{\rm hom}\to {\cal D}_{\rm full}$ from the kinematical Hilbert space
${\cal H}_{\rm hom}$ of a~homogeneous minisuperspace model, quantizing the degrees of freedom $a_I$, to the
space of distributional states in the full theory~\cite{SymmRed}.
This mapping is analogous to the classical ${\cal M}\to {\cal S}$, but the distributional nature of the
target space spells additional complications.
Moreover, with uncertainty relations violated, symmetric quantum evolution is not exact in the full theo\-ry.
Starting with a~homogeneous full (but distributional) state $\psi_{\rm hom}\in\sigma({\cal H}_{\rm
hom})\subset {\cal D}_{\rm full}$, the distributional extension of (the dual action of) the full
constraints $\hat{C}_{\rm full}$ or their gauge f\/lows $\exp(i\delta \hat{C}_{\rm full})$ does not leave
the state in the image of homogeneous states.
In loop quantum cosmology, methods exist to def\/ine and analyze maps $\sigma$,\footnote{More precisely, as
detailed below, instead of ${\cal D}_{\rm full}$ a~distributional space based on lattices, and therefore
fully inhomogeneous but not the most general states, are used.
Otherwise, if edges not adapted to the symmetry appear, there are obstructions to embeddings of
states~\cite{AinvinA}.} but the derivation of a~dynamical f\/low from the full theory remains dif\/f\/icult
even though candidates do exist.
Loop quantum cosmo\-lo\-gy therefore realizes an incomplete quantum reduction.
Wheeler--DeWitt quantum cosmology, on the other hand, is not a~reduction but a~pure minisuperspace
quantization since no analog of $\sigma$ exists.
(There are also models of loop quantum cosmology which do not go beyond pure minisuperspace models,
disregarding proper considerations of $\sigma$.)
In order to restrict or truncate full quantum evolution (or gauge f\/lows) to homogeneous states, one must
specify a~projection of $\hat{C}_{\rm full}\psi_{\rm hom}$ or $\exp(i\delta \hat{C}_{\rm full})\psi_{\rm
hom}$ back to the image of ${\cal H}_{\rm hom}$ in ${\cal D}_{\rm full}$, for all states $\psi_{\rm hom}$
and all full constraints~-- some part of the averaging problem plays a~role even for reduction when
quantum ef\/fects are involved, another indication of more-severe problems.
No such projection has been provided so far, and therefore the dynamics of reduced models, let alone
minisuperspace models, remains incomplete.
The problem is challenging not just owing to quantum issues, such as the distributional nature of symmetric
states.
Even classically, the question of how to project a~non-symmetric metric to a~homogeneous one is
complicated, and unresolved in its generality; it constitutes the averaging problem of cosmology.
Since a~complete derivation of reduced quantum models from the full theory of some form would, in its
semiclassical limit, include a~solution to the averaging problem, one cannot expect progress on the
dynamical front of quantum reduction unless the classical averaging problem is better understood.
The averaging problem remaining open, the only way at present to go beyond minisuperspace models is to use
properties of a~homogeneous background for an implementation of inhomoge\-nei\-ty, perhaps by perturbation
theory.
In classical cosmology, one commonly makes use of this perspective when inhomogeneous f\/ields are expanded
by Fourier transformation with respect to the modes on a~homogeneous background.
Classically, the approximation is well-understood.
In quantum cosmology, the procedure suf\/fers from the same problems encountered for homogeneous models,
and adds new ones related to the quantization of inhomogeneous modes.
Also the question whether results may depend sensitively on the background (and often gauge) chosen before
quantization remains thorny, related to the complicated anomaly issue of quantum gravity.
(Some anomaly-free realizations with partial quantum ef\/fects
exist~\cite{ConstraintAlgebra,LTBII,ScalarHol,TwoPlusOneDef,ModCollapse,ThreeDeform, JR}.
The anomaly problem is to be faced in canonical and covariant approaches alike: in canonical approaches it
appears in the constraint algebra; in covariant ones, in the path-integral measure or in the correct choice
of face amplitudes of spin foams~\cite{Anomaly}.)
Facing these dif\/f\/iculties, it is an ef\/fective viewpoint which allows progress, making use of
suf\/f\/iciently general parameterizations of quantum ef\/fects, but disregarding f\/ine details.
The viewpoint is half-way between minisuperspace models and a~complete dynamical embedding in the full
theory: One avoids the averaging problem by using inhomogeneous model states adapted to the symmetry, much
like classical Fourier modes on a~given background.
In practice, it is often lattice states with links along the symmetry generators that allow one to include
a~discrete version of inhomogeneity at the quantum level~\cite{InhomLattice}.
A background structure is then built into the framework, but it becomes possible to deal with
inhomogeneity, going beyond pure minisuperspace models and escaping their limitations and artefacts.
A background or some gauge f\/ixing has entered, possibly giving rise to new problems.
But at this stage, ef\/fects suf\/f\/iciently general and parameterized can give reliable access to the
physics involved.
In loop quantum cosmology, this procedure has been developed to the degree that cosmo\-lo\-gi\-cal phenomenology
can be done.
The kinematical structure~-- the basic algebra of holonomy and f\/lux operators~-- can be derived from
the full one.
Evolution and the dynamics, facing the classical averaging problem and the anomaly problem of quantum
gravity, remain much less understood, but here parameterizations have been developed that capture
interesting ef\/fects.
Especially the interplay of various quantum corrections, signif\/icant in dif\/ferent regimes of curvature,
puts restrictions on possible phenomena.
We will now go back to the basics of these constructions to clarify and generalize several mathematical
objects involved.
We will point out one major problem due to oft-used Abeliani\-za\-tions of cosmological models, overlooked so
far.
Its solution has several ramif\/ications even at the level of formulating the dynamics.
\section{Loop quantum cosmology}
\label{s:Mini}
Loop quantum gravity is of the type of a~quantized co-tangent bundle of the space of connections, with
additional constraints that restrict solutions to covariant dynamics.
One usually follows Dirac's quantization procedure, in which one f\/irst f\/inds a~representation of
connection components and their conjugate momenta as operators on a~suitable state space, and then requires
physical states to be invariant under the f\/low generated by constraint operators.
The same separation of kinematics (representing the connection and its momentum) and dynamics (implementing
the constraints) appears in homogeneous models.
In order to formulate the relevant expressions, the abstract index notation is useful and common.
The local connection 1-form is then denoted by $A_a^i$ in component form, while its momentum is $E^a_i$.
Indices $a,b,c,\ldots$ refer to the tangent space of the base manifold, while indices $i,j,k,\ldots$ refer
to the Lie algebra of the structure group, in this context SU(2).
(Geometrically, the momentum plays the role of a~densitized triad, or an orthonormal frame whose components
are multiplied with the absolute value of its determinant.) The positions of indices indicate the dual
nature of the f\/ields (such as 1-forms dual to vector f\/ields), and in products with pairs of mutually
dual indices contraction, or the summation over all values of the paired indices, is understood.
For instance, the densitized triad is related to the inverse spatial metric $q^{ab}$ by $E^a_i E^{bi}=
q^{ab} \det q$.
As used here, indices $i,j,k,\ldots$ are raised or lowered by contraction with the Killing metric of the
structure group (or an application of its associated musical morphism).
Indices $a,b,c,\ldots$ could be raised or lowered with the spatial metric $q_{ab}$, but since it depends on
the $E^a_i$, the f\/ield to be quantized, such operations are usually written explicitly.
In order to quantize the theory, one represents the basic geometric f\/ields, connections $A_a^i$ and
densitized triads $E^a_i$, by integrated versions: holonomies (or parallel transports) of the connection
and f\/luxes of the densitized triad.
(A f\/lux is a~surface integration of $E^a_i$, which is well-def\/ined if one uses the Hodge-type duality
of $E^a_i$ to an su(2)-valued 2-form $\epsilon_{abc}E^a_i$.) In homogeneous models, one tries to f\/ind
versions of these quantities that respect some transitive symmetry acting on space.
Instead of all curves for parallel transports and all surfaces for f\/luxes, one is then led to
a~restricted set.
As a~f\/irst step toward symmetry reduction, mathematical theorems are available to classify and construct
dif\/ferent types of connections invariant under the action of some symmetry group~\cite{KobNom}.
When specialized to homogeneity~\cite{cosmoI}, or a~transitive group action on space, a~set of models
equivalent to the usual Bianchi classif\/ication results: For every Bianchi type, there is a~set of three
left-invariant 1-forms $\omega_a^I$, $I=1,2,3$, obtained as standard invariant 1-forms on the corresponding
transitive symmetry group, for instance by expanding the Maurer--Cartan form $\omega_{\rm MC}=\omega^IT_I$
in terms of Lie-algebra generators $T_I$.
These 1-forms serve as a~basis of the space of invariant connections: All invariant connections are $A_a^i=
\tilde{c}^i_I \omega^I_a$ with spatial constants (but time-dependent) $\tilde{c}^i_I$.
Invariant densitized triads take the dual form, $E^a_i= \tilde{p}^{I}_i X_I^a |\det \omega_a^I|$ with
invariant vector f\/ields $X_I^a$ dual to $\omega_a^I$ (that is, $X_I^a\omega_a^J=\delta_I^J$).
This choice of densitized-triad components ensures that $(8\pi\gamma G)^{-1}\int_{\cal V}
\dot{A}_a^iE^a_i{\rm d}^3x= (8\pi\gamma G)^{-1}V_0\dot{\tilde{c}}^i_I \tilde{p}^I_i$, integrating the
symplectic term of an action over some region ${\cal V}$ of coordinate volume $V_0=\int_{\cal V}{\rm d}^3x$.
Up to constant factors, $\tilde{c}^i_I$ and $\tilde{p}^I_i$ are therefore canonically conjugate:
\begin{gather}
\label{Poisson}
\big\{\tilde{c}^i_I,\tilde{p}^J_j\big\}=\frac{8\pi\gamma G}{V_0}\delta^i_j\delta_I^J.
\end{gather}
A Bianchi metric of the given type is $q_{ab}=\big|\det\big(\tilde{p}^K_k\big)\big|
\tilde{p}^i_I\tilde{p}^i_J\omega_a^I\omega_b^J$ with inverse matrices $\tilde{p}^i_I$ of $\tilde{p}^I_i$.
The metric is invariant under rotations $\tilde{p}^I_i\mapsto R_i^j \tilde{p}^I_j$ with $R\in{\rm SO}(3)$,
generated as gauge transformations by the Gauss constraint $\epsilon_{ij}{}^k \tilde{c}^j_I\tilde{p}^I_k$.
The dif\/feomorphism constraint is not relevant for homogeneous models, and the Hamiltonian constraint
is~\cite{cosmoIII}
\begin{gather}
\nonumber
H=-\frac{1}{8\pi G\sqrt{\big|\det\big(\tilde{p}_i^I\big)\big|}}\Bigl(\epsilon_{ijk}C^K_{IJ}\tilde{c}^i_K\tilde{p}
^I_j\tilde{p}^J_k-\tilde{c}^j_I\tilde{c}^k_J\tilde{p}^I_j\tilde{p}^J_k+\tilde{c}^k_I\tilde{c}^j_J\tilde{p}
^I_j\tilde{p}^J_k
\\
\phantom{H=}
+2\big(1+\gamma^{-2}\big)\big(\tilde{c}^j_I-\Gamma^j_I\big)\big(\tilde{c}^k_J-\Gamma^k_J\big)\tilde{p}_j^{[I}\tilde{p}
_k^{J]}\Bigr)
\label{HamHom}
\end{gather}
with the structure constants $C^K_{IJ}$ of the Bianchi group and the spin connection $\Gamma^i_I$,
depending on~$C^K_{IJ}$ and $\tilde{p}^I_i$.
In diagonal models, where $\tilde{c}^i_I=\tilde{c}_{(I)}\delta_I^i$ and
$\tilde{p}_i^I=\tilde{p}^{(I)}\delta^I_i$ (no summation over~$I$), \eqref{HamHom}~reads~\cite{HomCosmo}
\begin{gather}
H=\frac{1}{8\pi G}\Big(\left((c_2\Gamma_3+c_3\Gamma_2-\Gamma_2\Gamma_3)\big(1+\gamma^{-2}
\big)-n^1c_1-\gamma^{-2}c_2c_3\right)\sqrt{|p^2p^3/p^1|}
\nonumber
\\
\phantom{H=} {}+\left((c_1\Gamma_3+c_3\Gamma_1-\Gamma_1\Gamma_3)\big(1+\gamma^{-2}\big)-n^2c_2-\gamma^{-2}
c_1c_3\right)\sqrt{|p^1p^3/p^2|}
\nonumber
\\
\phantom{H=}{}+\left((c_1\Gamma_2+c_2\Gamma_1-\Gamma_1\Gamma_2)\big(1+\gamma^{-2}\big)-n^3c_3-\gamma^{-2}
c_1c_2\right)\sqrt{|p^1p^2/p^3|}\Big)
\label{H}
\end{gather}
with spin-connection components
\begin{gather*}
\Gamma_I=\frac{1}{2}\left(\frac{p^K}{p^J}n^J+\frac{p^J}{p^K}n^K-\frac{p^Jp^K}{(p^I)^2}n^I\right)
\qquad
\text{for}
\qquad
\epsilon_{IJK}=1
\end{gather*}
and coef\/f\/icients $n^I$ of the Bianchi classif\/ication.
For Bianchi I with $C^K_{IJ}=0$, the constraint reduces to
\begin{gather*}
H=-\frac{1}{8\pi\gamma^2G}\frac{\tilde{c}^j_I\tilde{c}^k_J\tilde{p}^I_j\tilde{p}^J_k-\tilde{c}^k_I\tilde{c}
^j_J\tilde{p}^I_j\tilde{p}^J_k}{\sqrt{\big|\det\big(\tilde{p}_i^I\big)\big|}}.
\end{gather*}
In a~spatially f\/lat isotropic model with $\tilde{c}_I^i=\tilde{c}\delta_I^i$ and
$\tilde{p}^I_i=\tilde{p}\delta^I_i$ with $\tilde{c}=\gamma \dot{a}$ and $|\tilde{p}|= a^2/4$~--
see~\cite{cosmoI,LivRev} for the origin of the factor $1/4$~-- this expression reduces correctly to the
gravitational contribution $H=-3\big(4\pi\gamma^2 G\big)^{-1} \tilde{c}^2 \sqrt{|\tilde{p}|}$ of the Friedmann
equation.
\subsection{Abelian artefacts}
\label{s:arte}
\looseness=-1 One of the f\/irst steps of loop quantization consists in replacing connection components
$\tilde{c}_I^i$ with holonomies or exponentials $\exp\big(\tilde{c}_I^i\tau_i\big)\in {\rm SU}(2)$.
The vast majority of investigations in homogeneous loop quantum cosmology, however, deals with Abelian
models in which the original~SU(2) is replaced by~U(1), thanks either to additional isotropy
symmetries~\cite{IsoCosmo} or a~diagonalization assumption~\cite{HomCosmo}.
With isotropy or diagonalization, a~classical invariant connection automatically becomes Abelian, and the
use of U(1) is not ad hoc but required.
However, in quantum cosmology, Abelian structures turn out to allow specif\/ic choices of Hilbert space
representations not possible in non-Abelian ones.
Such quantizations, based essentially on spaces of functions on the Bohr compactif\/ication of the real
line, are therefore in danger of introducing additional artefacts~-- structural properties that cannot be
met in non-Abelian models, let alone the full theory.
For instance, an isotropic connection has the form $A_a^i=\tilde{c}\delta_a^i$ with just one phase-space
component $\tilde{c}$~\cite{IsoCosmo}.
Mimicking matrix elements of holonomies along straight lines such as the edges of the integration cube
${\cal V}$ (or some other f\/ixed set of edges), one f\/irst represents $\tilde{c}$ by U(1)-holonomies
$h=\exp\big(iV_0^{1/3}\tilde{c}\big)$, or $h^n=\exp(inc)$ with an integer U(1)-representation label~$n$ and
$c:=V_0^{1/3}\tilde{c}$.
By spanning a~function space with superpositions of~$h^n$ for all integer~$n$, all continuous functions on
the group~U(1) are realized.
From U(1)-holonomies $h^n$ one can reconstruct the connection component $c$ only modulo $2\pi$.
One gains full control over the connnection if one considers holonomies along all pieces of the edges of
the integration cube of lengths $\ell_0\leq V_0^{1/3}$, such that holonomies $h^{\mu}=\exp(i\mu c)$ with
\mbox{$\mu\in{\mathbb R}$} result, where $\mu$ may be considered as a~product~$\lambda n$ of the fractional edge
length $\lambda= \ell_0/V_0^{1/3}$ with the representation label~$n$.
Allowing for superpositions of all $h^{\mu}$ as an orthonormal basis, the Hilbert space of all integrable
functions on the Bohr compactif\/ication $\overline{{\mathbb R}}_{\rm Bohr}$ of the real line is
obtained~\cite{Bohr}, rather than a~function space on some periodif\/ication of~${\mathbb R}$.
In this procedure, which has become standard, one implicitly makes use of an identity realized for
representations of $\overline{{\mathbb R}}_{\rm Bohr}$ but lacking a~non-Abelian analog.
In the Abelian case, we start with the U(1)-holonomy $\exp(i\lambda c)$ and evaluate it in the
$\rho_n$-representation: $\rho_n(\exp(i\lambda c))=\exp(i\lambda nc)$.
It so happens that this is the same function of $c$ as obtained from $\rho_{\lambda n}(\exp(ic))$, now
using a~representation of $\overline{{\mathbb R}}_{\rm Bohr}$.
Holonomies in the $n$-representation have led us to the f\/irst expression, which is then identif\/ied with
the latter.
Since they agree as functions, one may base Hilbert space constructions on functions on $\overline{{\mathbb
R}}_{\rm Bohr}$.
However, this step is not available for non-Abelian models, in which case there is no relationship between
$\rho_j(\exp(\lambda A))$ and $\rho_{\lambda j}(\exp(A))$ for $A$ in the Lie algebra of the group, usually
SU(2).
The second expression is not even def\/ined unless $\lambda$ is an integer (or a~half-integer if $j$ is
integer), but even then, the two matrices are unrelated.
\looseness=-1
If functions on $\overline{{\mathbb R}}_{\rm Bohr}$ are used, one must proceed with care to avoid artefacts
in Abelian mo\-dels, a~problem which has not been realized in existing constructions.
Mathematically, one would confuse $\rho_n(\exp(i\lambda c))$ with $\rho_{\lambda n}(\exp(ic))$, which are
identical as functions of $c$ but have dif\/ferent meanings and are elements of dif\/ferent function spaces.
Physically, merging $\lambda$ and $n$ to one number $\mu=\lambda n$, as done when the Bohr
compactif\/ication is used, eliminates important information because the edge length $\lambda$ and
representation label (or geometrical excitation level) $n$ are then indistinguishable.
In operators, however, $\lambda$ and $n$ should play rather dif\/ferent roles according to what is known
from the full theory: Full holonomy operators act on (spin-network) states labeled by graphs with
representations assigned to their edges.
In the connection representation, such a~state can be written as a~function on the space of connections,
obtained by multiplying matrix elements of all parallel transports along the edges, taken in the respective
representations.
A single holonomy operator adds its curve as a~new edge to the graph if it had not been present before, and
it changes the representation if its curve overlaps with one of the original edges.
Since $\lambda$ in the reduction corresponds to the edge length, operators with dif\/ferent~$\lambda$
should change the underlying graph in dif\/ferent ways, while operators with dif\/ferent~$n$ but the same~$\lambda$ change the graph in the same way but modify the labels dif\/ferently.
These dif\/ferent types of actions cannot be modeled faithfully if reduced operators depend only on the
product~$\lambda n$.
In this section, we present a~new quantization of homogeneous models in which~$\lambda$ and $n$ are kept
separate~-- lifting their degeneracy and assigning to them distinct roles~-- in a~way that extends to
non-Abelian models.
\newpage
\subsection{Homogeneous holonomies}
A local homogeneous connection $A_{\rm hom}$ is a~1-form on the translational symmetry group $S$ underlying
some Bianchi model, taking values in the Lie algebra ${\cal L}G$ of the structure group $G$, $G={\rm
SU}(2)$ for gravity in Ashtekar--Barbero variables.
If the symmetry group acts freely, without any isotropy subgroups, there is a~one-to-one
correspondence~\cite{KobNom} between homogeneous connections according to $S$ and linear maps
$\tilde{\phi}\colon {\cal L}S\to {\cal L}G$ (or elements of ${\cal L}S^*\times{\cal L}G$), not required to
be Lie algebra homomorphisms.
Given $\tilde{\phi}$, the corresponding homogeneous connection is the pull-back $A_{\rm hom}=
\tilde{\phi}^* \omega_{\rm MC}$ under $\tilde{\phi}$ of the Maurer--Cartan form, which latter can be
written as $\omega_{\rm MC}=\omega^IT_I$ in terms of left-invariant 1-forms $\omega^I=\omega_a^I{\rm d}x^a$
on $S$ and its generators $T_I$.
The homogeneous connection components introduced before are the coef\/f\/icients in $\tilde{\phi}(T_I)=
\tilde{c}_I^i \tau_i$ with generators $\tau_i$ of ${\cal L}G$ (here, $\tau_j=-\frac{1}{2}i\sigma_j$ in
terms of Pauli matrices).
A minisuperspace model quantizes the components $\tilde{c}_I^i$, or rather the linear maps
$\tilde{\phi}$.
Both ingredients are in one-to-one correspondence, but the additional structure shown by the linear maps is
useful to decide how dif\/ferent quantum numbers, such as $\lambda$ and $n$ in Section~\ref{s:arte}
should be related to properties of~$S$ (space) and~$G$ (internal space).
We will therefore derive a~quantization based on the mathematical structure of $\tilde{\phi}$.
To extract independent degrees of freedom, we f\/ix a~set of generators $T_I$ of ${\cal L}S$ and understand
a~homogeneous $G$-connection for a~given symmetry group $S$ as a~set of maps $\tilde{\phi}_I\colon \langle
T_I\rangle \to {\cal L}G$ with the scaling condition $\tilde{\phi}_I(rX)= r\tilde{\phi}_I(X)$ for all
$r\in{\mathbb R}$.
Following the methods of loop quantum gravity, we quantize connection components in terms of holonomies.
According to the structure of homogeneous connections, we introduce the notion of homogeneous holonomies by
exponentiation~-- maps $h_{\phi}\colon {\cal L}S\to G, h_{\phi}(X)=\exp(\phi(X))$ with the scaling
condition $h_{\phi}(rX)=h_{r\phi}(X)$.
The maps $\phi_I=L_I\tilde{\phi}_I$ used here dif\/fer from $\tilde{\phi}_I$ by factors of~$L_I$, side
lengths of the integration region ${\cal V}$ of volume $V_0=L_1L_2L_3$, assumed cubic (spanned by the three
generators $T_I\cong X_I^a\partial/\partial x^a$ of the $S$-action).
If the sides of ${\cal V}$ are aligned with the three symmetry generators, one may think of $h_{\phi}(T_I)$
as the holonomy along the corresponding side.
This relationship will be made more precise below.
Elements $X\in{\cal L}S$ to which $\phi$ is applied carry information about the edge used to compute
holonomies $h_{\phi}(X)$.
Referring to the Killing metric on ${\cal L}S$, we decompose $X=\lambda v\not=0$ into its norm
$\lambda=|X|$ and the unit vector $v=X/|X|$, corresponding to the coordinate length of the edge and its
direction.
With the scaling condition, we then have $h_{\phi}(X)= \exp(\lambda \phi(v))$.
We can compute all information about $\phi$ from derivatives $\phi(T_I)= {\rm d} h_{\phi}(\lambda T_I)/{\rm
d}\lambda|_{\lambda=0}$.
We are indeed representing the space of all homogeneous connections, not some periodic identif\/ication.
The dependence of homogeneous holonomies on $\lambda=|X|$ will play an important role in our constructions.
If we consider an edge $e_I$ of coordinate length $\ell_I$ along the generator $X_I^a$, the holonomy, in
general a~path-ordered exponential $h_e={\cal P}\exp(\int_e{\rm d}s A_a^i\tau_i\dot{e}^a)$ of the
connection integrated over a~spatial curve $e(s)$, is $h_{e_I}= \exp(\ell_I \tilde{c}^i_I
\tau_i)=\exp(\ell_IL_I^{-1} \phi(T_I))=h_{\phi}(\lambda_IT_I)$ with $\lambda_I=\ell_I/L_I$.
If all edges are contained in the integration region, we always have $\lambda_I\leq 1$.
More generally, we can allow all real values, but for SU(2), given periodicity of the exponential function,
may restrict to $0\leq\lambda_I<4\pi$ without loss of generality.
For a~given connection, the three choices for $I$, or three directions of space, give rise to three
independent SU(2)-elements $h_{\phi}(T_I)$.
For f\/ixed $\lambda_I$, one can therefore describe the space of homogeneous connections in terms of ${\rm
SU}(2)^3$,\footnote{Thanks to homogeneity, each holonomy transforms as $h_{e_I}\mapsto g h_{e_I}g^{-1}$
under an internal gauge transformation, with the same $g\in{\rm SU}(2)$ for all three edges and on all
their endpoints.
These transformations are identical to those obtained in the full theory for three closed loops
intersecting in one 6-valent vertex~\cite{cosmoI}.
One may picture homogeneous spin-network states as such vertices, but with homogeneity, the vertex
corresponds to all of space~-- homogeneous states are distributional and not given by single spin
networks; see Section~\ref{s:dist}.} but the connection used can be reconstructed completely from
the holonomies only if dif\/ferent choices for $\lambda_I$, or curves of dif\/ferent lengths, are
considered.
If the curves and their lengths are f\/ixed, as in the original constructions
of \cite{cosmoI,IsoCosmo,HomCosmo}, only a~certain periodic identif\/ication of the space of connections is
realized.
In order to generalize homogeneous holonomies $h_{\phi}(T_I)$, we consider a~set ${\cal F}$ of functions
$g_{I}\colon {\mathbb R}\times{\cal L}G\to G$ that fulf\/ill $g_I(rL_I,r^{-1}\tilde{c}_I)=
g_I(L_I,\tilde{c}_I)$ for all $r\in{\mathbb R}$ and $I=1,2,3$ (the scaling condition).
In this way, we can drop the reference to particular edges as appropriate for a~minisuperspace model, but,
as demonstrated in what follows, will still be able to distinguish a~length parameter from a~spin label.
The choice $r=1/L_I$ shows that any such function can be written as $g_I(L_I,\tilde{c}_I)=
\tilde{g}_I(L_I\tilde{c}_I)$ with a~function $\tilde{g}_I$ of just one variable $A\in{\cal L}G$.
If $L_I$ and $r$ are f\/ixed, $g_I$ is simply the group exponential; setting $r$ free allows for
dif\/ferent scalings or dif\/ferent sizes $L_I$ of the integration region within one model.
\subsection{Representation}
\label{s:Rep}
For homogeneous models of loop quantum cosmology, we turn the function space based on holonomies into
a~Hilbert space with an action of holonomies and f\/luxes as basic operators, such that their commutator
corresponds to the classical Poisson bracket~\eqref{Poisson}.
One immediate problem caused by the non-Abelian nature of general connections in combination with
homogeneity regards the way of exponentiating connection components to holonomies and obtaining a~closed
basic algebra for $\{\exp(\lambda_Ic_I^i\tau_i),p^J_j\}$.
Once the path-ordering of inhomogeneous holonomies is no longer available, derivatives of
$\exp(\lambda_Ic_I^i\tau_i)$ by $c_J^j$ will produce extra factors of $\tau_j$ between products of
$c_I^i\tau_i$ in a~power-series expansion of the matrix exponential:
\begin{gather}
\label{Exp}
\frac{\partial\exp(\lambda_Ic_I^i\tau_i)}{\partial c_J^j}=\delta_I^J\sum_{n=0}^{\infty}\frac{\lambda_I^n}
{n!}\sum_{k=1}^n\big(c_I^i\tau_i\big)^{k-1}\tau_j\big(c_I^i\tau_i\big)^{n-k},
\end{gather}
but they do not automatically factor into products of exponentials with $\tau_j$ to mimic the full
holonomy-f\/lux algebra.
(While the cotangent bundle $T^*G$ def\/ines a~natural phase space with group-valued conf\/iguration
variables, it does not necessarily model the correct relation to inhomogeneous holonomies.)
For a~closed basic algebra to result, the factors in derivations of basic holonomy-like functions of
$c_I^i$ may have to be re-ordered, but within a~pure minisuperspace model, there is no guideline, no trace
of the path-ordering left by which one could construct a~natural ordering.
By looking more closely at the relation between basic operators in models and the full theory (or at least
extended curves in holonomies), we will be led to one distinguished choice.
\subsubsection{Hilbert space}
We f\/irst construct a~suitable $C^*$-algebra ${\cal A}$ of functions on homogeneous connections, making
use of our generalized homogeneous holonomies $g_I$: We consider a~function $\psi$ on the space of
homogeneous connections as a~function on the domain of def\/inition ${\mathbb R}\times{\cal L}G$ of $g_I$
which factorizes through $g_I$, that is a~function $\psi(L_I,\tilde{c}_I)$ which can be written as
$\bar{\psi}(g_I(L_I,\tilde{c}_I))$ with a~function $\bar{\psi}$ on $G$.
The scaling condition for $g_I$ then translates into an analogous condition for $\psi$.
\looseness=1
If we f\/ix $L_I$, considering $g_I(L_I,\tilde{c}_I)$ simply as an element of $G$, and refer to the
Peter--Weyl theorem, the general dependence on $\tilde{c}_I$ can be realized by superpositions of functions
$\langle m|\rho_j(g_I(L_I,\tilde{c}_I))| n\rangle$ with all irreducible representations $\rho_j$ of $G$ and
elements $|m\rangle$ and $|n\rangle$ of an orthonormal basis of the representation space of $\rho_j$.
Setting $L_I$ free, with $g_I(\cdot,\tilde{c}_I)$ as a~1-parameter family of $G$-elements, a~larger class
of functions is possible.
However, the scaling condition can be realized only if our functions are superpositions of
$\rho_{\lambda,j}(g_I)^m_n:= \langle m|\rho_j(g_I(\lambda L_I,\tilde{c}_I))|n\rangle$ for
$\lambda\in{\mathbb Q}$.
(The restriction to rational as opposed to real $\lambda$ will be motivated later on.
The labels $\lambda$, $j$, $m$ and $n$ may depend on $I$, but we will often suppress the dependence for
notational simplicity.) Note that, perhaps in a~slight abuse of notation, $\rho_{\lambda,j}$ is not
a~representation of the group ${\mathbb R}\times G$.
It does, however, provide elements of a~suitable function space, whose elements are labeled by~$\lambda$,~$j$,~$m$, and~$n$ and on which we can represent holonomies as constructed in what follows.
(The ${\mathbb R}$-factor is related to curve lengths and not part of the structure group, and therefore
should not be expected to be represented in the standard way of gauge theories.)
We multiply two functions $\rho_{\lambda_1,j_1}(g_I)^{m_1}_{n_1}$ and
$\rho_{\lambda_2,j_2}(g_J)^{m_2}_{n_2}$ as follows: If $I\not=J$, we simply take the product function
depending on $g_I$ and $g_J$ as independent variables, thereby generating a~tensor-product space.
If $I=J$, we write $\lambda_1=N_1z$ and $\lambda_2=N_2z$, with integers~$N_1$ and~$N_2$ and~$z$ the largest
rational number that obeys the two relationships (so that~$N_1$ and~$N_2$ are relatively prime), and
def\/ine
\begin{gather}
\rho_{\lambda_1,j_1}(g_I)^{m_1}_{n_1}\!\cdot\!\rho_{\lambda_2,j_2}(g_I)^{m_2}_{n_2}
:=\sum_{h_1,\ldots,h_{N_1-1},k_1,\ldots k_{N_2-1}}\rho_{z,j_1}(g_I)^{m_1}_{h_1}\rho_{z,j_1}(g_I)^{h_1}
_{h_2}\cdots\rho_{z,j_1}(g_I)^{h_{N_1-1}}_{n_1}
\nonumber
\\
\phantom{\rho_{\lambda_1,j_1}(g_I)^{m_1}_{n_1}\!\cdot\!\rho_{\lambda_2,j_2}(g_I)^{m_2}_{n_2}:=}
\times\rho_{z,j_2}(g_I)^{m_2}_{k_1}\rho_{z,j_2}(g_I)^{k_1}_{k_2}\cdots\rho_{z,j_2}(g_I)^{k_{N_2-1}}_{n_2}.
\label{Mult}
\end{gather}
One may decompose the products of matrix elements on the right-hand side into superpositions of irreducible
contributions to the tensor product $\rho_{j_1}^{\otimes N_1}\otimes \rho_{j_2}^{\otimes N_2}$, akin to
a~spin-network decomposition in the full theory~\cite{RS:spinnet}.
The product is then again a~superposition of $\rho_{\lambda,j}(g_I)^m_n$.
Multiplication as def\/ined is commutative and associative because these properties are respected by the
conditions def\/ining $z$.
(For associativity, the condition that $z$ be maximal is crucial.
There is then a~unique $z$ so that $\lambda_i=N_iz$ for any given number $n$ of $\lambda_i$,
$i=1,\ldots,n$.) There is a~unit element given by $\lambda=0$ (in which case the value of $j$ does not
matter).
For va\-nishing $j$, having the trivial representation of $G$, one may expect a~trivial action, too.
However, according to~\eqref{Mult}, multiplication with $\rho_{\lambda,0}(g_I)$ for $\lambda\not=0$ may
still give rise to decompositions of factors, providing a~dif\/ferent form of the function product even
though the values taken by the original function and its product with $\rho_{\lambda,0}(g_I)$ do not
dif\/fer.
The functions $\rho_{\lambda,0}(g_I)$ (mat\-rix indices are not required in the trivial representation) play
the role of ref\/inement operators, decomposing a~holonomy into pieces whose length is determined by
$\lambda$ in relation to the corresponding parameter of the state acted on.
Since $\rho_0(h_{\phi}(\lambda T_I))=1$ classically, these ref\/inement operators have no classical analog,
as one may expect for a~classical theory knowing nothing about the underlying discreteness.
We def\/ine a~star operation by $(\rho_{\lambda,j}(g_I)^m_n)^*:= \overline{\rho_{\lambda,j}(g_I)^m_n}$
(related to matrix elements of the dual representation $\rho_j^*$ of $\rho_j$ for unitary groups).
The space of functions turns into an Abelian $C^*$-algebra with the supremum norm, assuming $G$ to be
compact.
The supremum is obtained by evaluating $\rho_{\lambda,j}(g_I)^m_n$ on ${\cal L}G$.
(Thanks to the scaling condition, the supremum does not depend on $\lambda$.) The $C^*$-identity
$||\rho^*\cdot \rho||= ||\rho^*||\: ||\rho||$ then follows as it does for the standard example of functions
on $G$.
A Hilbert space structure is obtained by combining the product rule with the Haar measure on $G$.
We def\/ine the inner product
\begin{gather}
\label{InnProd}
\big(\rho_{\lambda_1,j_1}(g_I)^{m_1}_{n_1},\rho_{\lambda_2,j_2}(g_J)^{m_2}_{n_2}\big):=\prod_K\int_G{\rm d}
\mu_{\rm H}(g_K(z L_K,\tilde{c}_K))(\rho_{\lambda_1,j_1}(g_I)^{m_1}_{n_1})^*\cdot\rho_{\lambda_2,j_2}
(g_J)^{m_2}_{n_2},\!\!\!
\end{gather}
where $z$ is def\/ined as before for given $\lambda_1$ and $\lambda_2$.
Equation~\eqref{InnProd} is just the standard inner product on $G$ if we realize, using~\eqref{Mult}, that
the relevant degree of freedom of $(\rho_{\lambda_1,j_1}(g_I)^{m_1}_{n_1})^* \cdot
\rho_{\lambda_2,j_2}(g_J)^{m_2}_{n_2}$ is $g_K(zL_K,\tilde{c}_K)$.
On the right-hand side of this equation, the value of $z$ no longer matters because we integrate over all
group elements $g_K(zL_K,\tilde{c}_K)$, the sole arguments of $(\rho_{\lambda_1,j_1}(g_I)^{m_1}_{n_1})^*
\cdot \rho_{\lambda_2,j_2}(g_J)^{m_2}_{n_2}$.
\subsubsection{Flux operators}
\label{s:Flux}
Components $p^I_i=L_JL_K\tilde{p}^I_i$ ($\epsilon_{IJK}=1$) of the densitized triad, canonically conjugate
to $c_I^i=L_I\tilde{c}_I^i$ via $\{c_I^i,p^J_j\}= 8\pi\gamma G \delta_I^J\delta^i_j$, are quantized to
operators with action
\begin{gather}
\label{Deriv}
\hat{p}^I_i\rho_{\lambda,j}(g_J)^m_n:=-8\pi i\gamma\ell_{\rm P}^2\lambda\delta^I_J\rho_{\lambda,j}
(\tau_i g_J)^m_n
\end{gather}
on our Hilbert space, where we def\/ine the short form $\rho_{\lambda,j}(\tau_i g_J)^m_n:= \sum_k
\rho_j(\tau_i)^m_k \rho_{\lambda,j}(g_J)^k_n$.
Non-Abelian f\/lux operators as def\/ined are {\em not} symmetric because the product~\eqref{Mult}
in~\eqref{InnProd} in general includes a~decomposition.
(One may think of the measure factor as including ref\/inement operators $\rho_{z,0}(g_K)$.) While a~f\/lux
operator acting on either entry in the inner product inserts a~$\tau_i$ to the left of $g_J$ according
to~\eqref{Deriv}, the integration required to evaluate the inner product splits holonomies according to the rational number $z$ depending on $\lambda_1$ and $\lambda_2$.
Integration by parts, performed after multiplying according to~\eqref{Mult} and decomposing, would then
insert $\tau_i$ in each decomposed contribution, not just to the left of the whole $g_K$.
A convenient set of states in the homogeneous Hilbert space is given by a~form of spin-network functions,
depending on the connection via f\/initely many holonomies,
\begin{gather*}
\psi(g_1,g_2,g_3)=\sum_{\lambda_J^{(k)},j_J^{(k)},m_J^{(k)},n_J^{(k)}}\psi_{\lambda_1^{(k)},\lambda_2^{(k)},
\lambda_3^{(k)},j_1^{(k)},j_2^{(k)},j_3^{(k)},m_1^{(k)},m_2^{(k)},m_3^{(k)},n_1^{(k)},n_2^{(k)},n_3^{(k)}}
\\
\phantom{\psi(g_1,g_2,g_3)=}
\times
\prod_{I=1}^3\rho_{\lambda_I^{(k)},j_I^{(k)}}(g_I)^{m_I^{(k)}}_{n_I^{(k)}}
\end{gather*}
with coef\/f\/icients
$\psi_{\lambda_1^{(k)},\lambda_2^{(k)},\lambda_3^{(k)},j_1^{(k)},j_2^{(k)},j_3^{(k)},m_1^{(k)},
m_2^{(k)},m_3^{(k)},n_1^{(k)},n_2^{(k)},n_3^{(k)}}$ for $k$ in some f\/inite index set.
Acting on the contribution $\rho_{\lambda_I,j_I}(g_I)$, $\hat{J}^I_i=(8\pi\gamma\ell_{\rm
P}^2)^{-1}\lambda_I^{-1}\hat{p}^I_i$ satisf\/ies the ${\cal L}G$-algebra by the def\/inition
in~\eqref{Deriv}.
For SU(2), as used in loop quantum gravity, the f\/lux spectrum is therefore given by all numbers
$8\pi\gamma\ell_{\rm P}^2\lambda m$ with half-integer $m$, and the area spectrum (or the spectrum of
$\sqrt{\hat{p}^I_i\hat{p}^{(I)i}}$) by $8\pi\gamma\ell_{\rm P}^2\lambda \sqrt{j(j+1)}$.
Although these eigenvalues are real, one can see the non-symmetric nature of non-Abelian f\/lux operators:
Eigenstates with dif\/ferent eigenvalues (specif\/ically, states with the same $j$ but dif\/ferent
$\lambda$) are not necessarily orthogonal, again owing to the decomposition in~\eqref{InnProd}.
With all rational $\lambda$ allowed, these spectra form continuous sets, but all eigenstates are
normalizable.
The spectra are pure point.
\subsubsection{Heuristics and properties}
To summarize so far, the non-Abelian nature of connections requires care in the proper ordering of
holonomy-type variables to be used in lieu of connections for a~loop quantization.
We refer to edge-integrated holonomies rather than pointwise exponentials, even in homogeneous models.
But since the connection is still homogeneous, we must specify the product rule~\eqref{Mult}~-- the
f\/irst place in our constructions where dif\/ferent holonomies may be compared~-- so that the correct
reduction of degrees of freedom is realized.
This requirement leads us to the decomposition rule, which then further motivates def\/initions of
compatible inner products and derivative operators, including non-selfadjoint features of the latter.
(Decomposition is not an issue in Abelian models because they obey $\rho_{\lambda,n}(\exp(iLc))=
\rho_n(\exp(iLc/r))^p$ for $\lambda=p/r$ with integer $p$ and $r$, an identity that trivially brings all
holonomies to the same distance $L$.
However, this feature makes use of the Abelian coincidence discussed in Section~\ref{s:arte}.
See below for more on Abelian models.)
The multiplication rule~\eqref{Mult} observes homogeneity: One can interpret the law as a~decomposition of
two initial holonomies of dif\/ferent lengths as products of equal-length pieces.
Without homogeneity, these pieces at dif\/ferent places would be independent, but homogeneity makes them
identical.
We therefore take the tensor product of all small pieces, split as illustrated in Fig.~\ref{Hol}, and sum
over indices according to the product form.
At this stage, it becomes clear why $\lambda$ should be rational: For incommensurate $\lambda_1$ and
$\lambda_2$, the product $\rho_{\lambda_1,j_1}(g_I)^{m_1}_{n_1}\cdot \rho_{\lambda_2,j_2}(g_I)^{m_2}_{n_2}$
would have to be split into inf\/initely many inf\/initesimally small pieces\footnote{Projective-limit
constructions, in which states with a~given denominator $r$ would play the analog of f\/ixed-state
cylindrical states in the full theory, may be used to allow for incommensurate parameters $\lambda$, but we
will not need this in the present article.}.
\begin{figure}[t] \centering
\includegraphics[width=5cm]{Bojowald-Fig1a} \hspace{3cm} \includegraphics[width=2cm]{Bojowald-Fig1b}
\caption{Evaluated in homogeneous connections, dif\/ferent pieces of equal length in one holonomy amount to
the same function (left).
To avoid overcounting degrees of freedom, multiplying a~holonomy with a~shorter one requires
a~decomposition into pieces of maximal common length (right), giving rise to the rule~\eqref{Mult}.}
\label{Hol}
\end{figure}
In this picture, non-symmetric f\/lux operators~\eqref{Deriv} arise because reduction entails averaging and
decomposition, and what may appear as a~simple quantization of the densitized triad in a~pure
minisuperspace model turns out to be a~more complicated operator when inhomogeneous degrees of freedom are
taken into account.
We will see this in more detail in Section~\ref{s:AvOp}.
Even non-symmetric f\/lux operators (quantizing the densitized triad combined with the action of
holonomy-dependent decomposition, realized for instance by $\rho_{\lambda,0}(g_I)$) model the behavior of
the full theory, in which f\/luxes are self-adjoint.
Also the commutator $[\rho_{\lambda,j}(g_J)^m_n,\hat{p}^I_i]$ of basic operators in the non-Abelian
holonomy-f\/lux algebra requires care due to decomposition.
Up to ordering, it equals $8\pi i\gamma \ell_{\rm P}^2 \lambda \delta_J^I \rho_{\lambda,j}(\tau_i g_J)^m_n$
and corresponds to $i\hbar$ times the Poisson bracket of $\rho_j(\exp(\lambda c_J^i\tau_i))^m_n$, the
classical analog of $\rho_{\lambda,j}(g_J)^m_n$, and $p_i^I$.
For an example of ordering issues, as indicated in the beginning of Section~\ref{s:Rep}, look at the
commutator $[\rho_{1/2,j}(g_I)^M_N,\hat{p}^I_i]$ acting on the state $\psi(g_I)=\rho_{1,j}(g_I)^m_n$.
Acting with $\hat{p}^I_i$ f\/irst produces a~single insertion of $\tau_i$ to the left of $g_I$.
Acting with $\hat{p}^I_i$ after the action of $\rho_{1/2,j}(g_I)$ produces two insertions because we
f\/irst decompose $\rho_{1/2,j}(g_I)^{m'}_{n'}\cdot
\rho_{1,j}(g_I)^m_n=\sum_k\rho_{1/2,j}(g_I)^{m'}_{n'}\rho_{1/2,j}(g_I)^m_k \rho_{1/2,j}(g_I)^k_n$
according to~\eqref{Mult} and use the Leibniz rule.
The result,
\begin{gather*}
4\pi i\gamma\ell_{\rm P}^2\rho_{1/2,j}(\tau_i g_I)^M_N\psi(g_I)+4\pi i\gamma\ell_{\rm P}^2\rho_{1/2,j}(g_I)^M_N
\!
\left(\!\sum_k\rho_{1/2,j}(g_J)^m_k\rho_{1/2,j}(\tau_ig_I)^k_n-\rho_{1,j}(\tau_i g_I)^m_n\!\right)
\end{gather*}
is as expected up to the second term, a~contribution that can be made to vanish by reordering.
Similar calculations for arbitrary $\lambda_1$ and $\lambda_2$ in
$[\rho_{\lambda_1,j_1}(g_I)^M_N,\hat{p}^I_i]$ acting on a~state $\psi(g_I)=\rho_{\lambda_2,j_2}(g_I)^m_n$
($J$-contributions with $J\not=I$ do not matter) result in
\begin{gather*}
[\rho_{\lambda,j}(g_I)^M_N,\hat{p}^I_i]=8\pi i\gamma\ell_{\rm P}^2\lambda\rho_{\lambda,j}
(\tau_ig_I)^M_N+8\pi i\gamma\ell_{\rm P}^2\hat{R}_i^I\rho_{\lambda,j}(g_I)^M_N.
\end{gather*}
The specif\/ic ordering introduces an extra contribution from the reordering operator $\hat{R}_i^I$
def\/i\-ned~by
\begin{gather*}
\hat{R}_i^I\bigl(\rho_{\lambda_1,j_1}(g_I)^{m_1}_{n_1}\cdot
\cdots
\cdot\rho_{\lambda_N,j_N}(g_I)^{m_N}
_{n_N}\bigr)=z\sum_{n=1}^N\rho_{\lambda_1,j_1}(g_I) \cdots
\\
\qquad{}
\times\left(\sum_{p=1}^{N_n}\sum_{k_1,\ldots,k_{N_n-1}}\rho_{z,j_n}(g_I)^{m_n}_{k_1}\cdots\rho_{z,j_n}
(\tau_i g_I)^{k_p}_{k_{p+1}}\cdots\rho_{z,j_n}(g_I)^{k_{N_n-1}}_{n_n}\right)
\cdots \rho_{\lambda_N,j_N}(g_I)
\\
\qquad
{}-\sum_{n=1}^N\lambda_n\rho_{\lambda_1,j_1}(g_I)^{m_1}_{n_1} \cdots \rho_{\lambda_n,j_n}
(\tau_ig_I)^{m_n}_{n_n} \cdots \rho_{\lambda_N,j_N}(g_I)^{m_N}_{n_N}
\end{gather*}
with $z$ def\/ined as before but for all $\lambda_n$ involved: $\lambda_n=N_nz$ with integers $N_n$ and
ra\-tio\-nal~$z$ maximal.
Up to ordering, $\hat{R}_i^I(\rho_{\lambda_1,j_1}(g_I)^{m_1}_{n_1}\cdots
\rho_{\lambda_N,j_N}(g_I)^{m_N}_{n_N})$ vanishes.
For more compact notation, one may express the ref\/inement included in the action of $\hat{R}_i^I$ in
terms of $\rho_{\lambda,0}(g_I)$ with suitable $\lambda$.
Therefore, $\hat{R}_i^I$ is not independent of the operators already introduced.
Specif\/ically, we can write $\hat{R}_i^I=-(8\pi i \gamma\ell_{\rm P}^2)^{-1} \hat{p}^I_i
(\rho_{z,0}(g_I)-1)$ where $z$ is determined as before for all $\lambda$-parameters of holonomy factors to
the right of $\rho_{z,0}(g_I)$.
The basic commutator then reads
\begin{gather*}
\big[\rho_{\lambda,j}(g_I)^M_N,\hat{p}^I_i\big]=-\big(\hat{p}^I_i\rho_{\lambda,j}(g_I)^M_N\big)-\hat{p}^I_i(\rho_{z,0}
(g_I)-1)\rho_{\lambda,j}(g_I)^M_N=\widehat{\big\{\rho_j(h_{\phi}(\lambda T_I))^M_N,p^I_i\big\}}.
\end{gather*}
Since $\rho_{\lambda,0}(g_I)=1$ classically, the commutator can indeed play the role of a~quantization of
the Poisson bracket, $\widehat{\big\{\rho_j(h_{\phi}(\lambda T_I))^M_N,p^I_i\big\}}= 8\pi i\gamma\ell_{\rm P}^2
\lambda \rho_{\lambda,j}(\tau_ig_I)^M_N$.
Note that it does not quantize~\eqref{Exp}.
In addition to a~quantum representation, we have to choose an ordering of non-Abelian terms when we
def\/ine~\eqref{Deriv}, corresponding to our realization of connection degrees of freedom.
We do not represent pointwise exponentials, which as argued after~\eqref{Exp} would not give rise to
a~closed algebra, but rather, as we will conf\/irm later, use re-ordered classical exponentials closer to
integrated holonomies.
The basic algebra is more complicated than one may have expected, but we do obtain a~closed algebra of
basic operators with the correct classical limit.
Each operator $\hat{p}_j^I$ can be viewed as a~component of the f\/lux operator for a~surface given by the
full side of a~cubic ${\cal V}$, transversal to the direction $X_I^a$.
These minisuperspace f\/luxes refer just to the artif\/icial integration region ${\cal V}$ and its
coordinate volume $V_0=L_1L_2L_3$, not to actual edge lengths or areas of dual surfaces, for instance in
a~lattice~-- we are still quantizing homogeneous connections.
One can easily rescale the linear f\/lux by using $\lambda_J\lambda_Kp_i^I= \ell_J\ell_K \tilde{p}_i^I$ for
$\epsilon_{IJK}=1$, now corresponding to the f\/lux through a~surface of area related to edge lengths
$\ell_J=\lambda_JL_J$ and $\ell_K=\lambda_KL_K$.
Without a~lattice construction to show how dual surfaces are related to links, however, a~strict
minisuperspace quantization does not provide a~satisfactory, ${\cal V}$-independent def\/inition of
f\/luxes.
We will complete the construction in Section~\ref{s:Aver} when considering the relation to the full
theory.
The construction provided here is certainly more complicated than a~traditional minisuperspace
quantization, but it has several convenient features:
\begin{itemize}\itemsep=0pt
\item Because $\phi$ is required to be a~linear map, we need consider holonomies only along linear
combinations of the generators $T_I$, identif\/ied with tangent vectors along ``straight lines''. No
path-ordering is required to compute these holonomies, and they have simple forms.
(For examples of more complicated holonomies, see~\cite{AinvinA}.) In particular, their matrix elements
span a~non-Abelian version of the space of almost-periodic functions: superpositions of periodic functions
with dif\/ferent periodicities given by $\lambda$.
\item The space of homogeneous holonomies is compact if $G$ is compact, as the spectrum of a~unital Abelian
$C^*$-algebra.
Homogeneous loop quantum cosmology makes use of function spaces on compactif\/ications of the classical
spaces of homogeneous connections, which contain all classical connections as a~dense subset.
\item If $G$ is Abelian, the homogeneous Hilbert space is (non-unitarily) related to a~product of the
Hilbert space ${\cal H}_{\rm Bohr}$ of square-integrable functions on the Bohr compactif\/ication of the
real line, with $\dim(S)\times\dim(G)$ factors.
More details will be given below.
\item If $S$ does not act freely, its isotropy subgroup requires additional identif\/ications of the
components of $\tilde{\phi}_I^i$: Linear maps $\tilde{\phi}$ must then satisfy $\tilde{\phi}\circ {\rm
ad}_f= {\rm ad}_{F(f)}\tilde{\phi}$ for any $f$ in the isotropy subgroup and a~corresponding element
$F(f)\in G$; see~\cite{CUP,SymmRed, Brodbeck} for details.
Accordingly, we restrict homogeneous holonomies by $h_{\phi}({\rm ad}_fX)= h_{{\rm ad}_{F(f)} \circ
\phi}(X)$ in addition to the scaling condition.
These functions, and therefore~$g_I$, take values in a~subgroup of~$G$, the centralizer of the subset of
all~$F(f)$ in~$G$, which is Abelian if the isotropy subgroup is suf\/f\/iciently large\footnote{Formally,
classical Abelianization may also be implemented via second-class
constraints~\cite{LQCGaugeFix2, LQCGaugeFix}.}.
\end{itemize}
\subsubsection{Diagonalization}
Detailed ordering prescriptions make some formulas of representations of non-Abelian models look rather
complicated.
It is more straightforward to describe the space of connections by holonomies when diagonal Bianchi models
are used, implying Abelianization.
In diagonal mo\-dels, the relevant parameters appear by writing a~homogeneous connection as
$A_a^i=\tilde{c}_{(I)} \omega_a^I \Lambda_I^i$ with left-invariant 1-forms $\omega_a^I$ of the given
symmetry type and SO(3)-matrices $\Lambda_I^i$ which can easily be f\/ixed to equal $\delta_I^i$.
(Writing $\tilde{c}_I^i= \tilde{c}_{(I)}\Lambda_I^i$, not summing over $I$, makes use of the polar
decomposition of matrices~\cite{HomCosmo}.) The conjugate f\/ield, the densitized triad, has a~similar
decomposition, $E^a_i= \tilde{p}^{(I)} X_I^a \Lambda^I_i |\det \omega_a^I|$.
All ingredients except $\tilde{c}_I$ and $\tilde{p}^I$ are determined by the symmetry type or gauge
choices, and $\tilde{c}_I$ and $\tilde{p}^I$ are the canonical degrees of freedom.
As before, the symplectic term $(8\pi \gamma G)^{-1}\int_{{\cal V}}{\rm d}^3x
\dot{A}_a^iE^a_i=V_0(8\pi\gamma G)^{-1} \dot{\tilde{c}}_I\tilde{p}^I$, integrated over some bounded region
${\cal V}$ of volume $V_0$, provides Poisson brackets
\begin{gather}
\label{PoissonAbel}
\big\{\tilde{c}_I,\tilde{p}^J\big\}=\frac{8\pi\gamma G}{V_0}\delta^J_I.
\end{gather}
Holonomies $h_{e_I}=\exp(\ell_I\tilde{c}_{(I)}\Lambda_I^i\tau_i)$ of diagonal connections still take values
in SU(2).
However, the relations between $h_{e_I}$ for dif\/ferent $I$ are not arbitrary because their generators
$Y_I:=\ell_I\tilde{c}_{(I)}\Lambda_I^i\tau_i$ satisfy the condition ${\rm tr}(Y_IY_J)=0$ for $I\not=J$.
(Dif\/ferent rows or columns of the SO(3)-matrix $\Lambda_I^i$ are orthogonal with respect to the
SU(2)-Killing metric $\eta_{ij}= -2{\rm tr}(\tau_i\tau_j)=\delta_{ij}$.) Although the $h_{e_I}$ do not
commute with one another, any pair of them obeys $gh=hg+h^{-1}g+hg^{-1}- {\rm tr}(hg)$~\cite{HomCosmo}: A
product of diagonal holonomies with $h$ appearing to the right of $g$ can be reordered so that $h$ appears
on the left in all terms.
The gauge structure of diagonal models is essentially Abelian.
To make Abelianization manifest, one usually works with matrix elements $h_I= \exp(i\ell_I
\tilde{c}_I/2)\!\in\! {\rm U}(1)$, completing the reduction of the theory to an Abelian one.
For diagonal models, all phase-space information is indeed captured by these matrix elements because
$\exp(\ell_I\tilde{c}_{(I)} \Lambda_I^i\tau_i)= \cos(\ell_I\tilde{c}_I/2)+2\Lambda^i_I\tau_i
\sin(\ell_I\tilde{c}_{(I)}/2)$.
Fluxes computed for surfaces normal to $X_K^a$ are $F^K=\ell_I\ell_J\tilde{p}^K$ ($\epsilon_{IJK}=1$).
Holonomies as multiplication operators on states in the connection representation and f\/luxes as
derivatives simplify signif\/icantly by Abelianization.
For instance, the volume ope\-ra\-tor, notorious for its complicated spectrum in the full
theory~\cite{VolSpecI,VolSpecII} and also on the 6-valent vertices of non-diagonal homogeneous
models~\cite{cosmoII}, is a~simple product $\hat{V}=\sqrt{|\hat{F}^1\hat{F}^2\hat{F}^3|}$ of derivative
operators $\hat{F}^K=-8\pi i\gamma\ell_{\mathrm P}^2 \lambda_I\lambda_J\partial/\partial c_K$ ($\epsilon_{IJK}=1$) on
U(1).
Abelianization also implies that a~triad representation becomes available.
\subsubsection{Abelian homogeneous connections}
{\sloppy In diagonalized or isotropic models we encounter Abelian homogeneous connections with \mbox{$G={\rm U}(1)$}.
In this case, the structures introduced for general homogeneous connections simplify: Our function space
consists of superpositions of functions $\rho_{\lambda,n}(g)$ of a~single variable
$g(L,\tilde{c})=\exp(iL\tilde{c})$ per independent direction, with $\lambda\in{\mathbb Q}$ and
$n\in{\mathbb N}$, using U(1)-representations \mbox{$\rho_n(g)=g^n$}.
Multiplication~\eqref{Mult} now reads
\begin{gather}
\label{MultAbel}
\rho_{\lambda_1,n_1}(g)\cdot\rho_{\lambda_2,n_2}(g)=\rho_{z,n_1}(g)^{N_1}\rho_{z,n_2}(g)^{N_2}
=\rho_{z,N_1n_1+N_2n_2}(g),
\end{gather}
again with integers $N_1$, $N_2$ and maximal $z$ so that $\lambda_1=N_1z$ and $\lambda_2=N_2z$.
The star relation is $\rho_{\lambda,n}(g)^*= \rho_{\lambda,-n}(g)$, and the inner product~\eqref{InnProd}
evaluates to
\begin{gather}
\nonumber
(\rho_{\lambda_1,n_1}(g),\rho_{\lambda_2,n_2}(g))=\frac{1}{2\pi}\int_0^{2\pi/z}{\rm d}
(zc)\rho_{\lambda_1,-n_1}(\exp(ic))\cdot\rho_{\lambda_2,n_2}(\exp(ic))
\\
\qquad=\frac{1}{2\pi}\int_0^{2\pi}{\rm d}x\rho_{-N_1n_1+N_2n_2}(\exp(ix))=\delta_{N_1n_1,N_2n_2}=
\begin{cases}
0,&\lambda_1n_1\not=\lambda_2n_2,
\\
1,&\lambda_1n_1=\lambda_2n_2
\end{cases}
\label{InnProdAbel}
\end{gather}
after substitution.
Finally, the derivative operator~\eqref{Deriv} is
\begin{gather*}
\hat{p}\rho_{\lambda,n}(g)=8\pi\gamma\ell_{\rm P}^2\lambda n\rho_{\lambda,n}(g)
\end{gather*}
with eigenvalues $8\pi\gamma \ell_{\rm P}^2 \lambda n$.
(This Abelian derivative operator is self-adjoint.)
}
These equations bear some semblance with representations on function spaces on the Bohr compactif\/ication
of the real line, but they are not identical.
\subsubsection{Relation to the Bohr compactif\/ication of the real line}
As recalled in Section~\ref{s:arte}, traditional loop-based minisuperspace quantizations, for
instance in isotropic models, combine the length parameter $\ell_0$ of edges with discrete representation
labels as one real number, giving exponentials $h(\tilde{c})^{n}=\exp(in\ell_0\tilde{c})=\exp(in\lambda c)$
with $c=V_0^{1/3}\tilde{c}$, $\lambda=\ell_0/V_0^{1/3}$, and $\mu=n\lambda\in{\mathbb R}$.
In this Abelian case, homogeneous connections are often viewed as elements of the Bohr compactif\/ication
$\overline{{\mathbb R}}_{\rm Bohr}$ of the real line~\cite{Bohr} (rather than U(1), which is obtained for
f\/ixed $\ell_0$~\cite{IsoCosmo}).
The Bohr compactif\/ication of the real line is a~compact Abelian group with representations in one-to-one
correspondence with those of ${\mathbb R}$: they are given by $z\mapsto z^{\mu}$ for all real
$\mu$.\footnote{Starting from U(1) instead of ${\mathbb R}$, we make the family of representations
continuous by enlarging the group manifold while keeping it compact.
This procedure has no analog for the non-Abelian SU(2), in which non-trivial Lie brackets determine the
representations and discrete spectra of its generators, as well-known from angular momentum in quantum
mechanics.} Functions on $\overline{{\mathbb R}}_{\rm Bohr}$ form a~Hilbert space using the Haar measure
\begin{gather}
\label{BohrInt}
\int{\rm d}\mu_{\rm H}(c)=\lim_{C\to\infty}\frac{1}{2C}\int_{-C}^C{\rm d}c.
\end{gather}
As per the Peter--Weyl theorem, all continuous functions on $\overline{{\mathbb R}}_{\rm Bohr}$ can be
written as countable superpositions
\begin{gather*
\psi(c)=\sum_{\mu}\psi_{\mu}\exp(i\mu c),
\end{gather*}
states $\exp(i\mu c)$ forming an orthonormal basis for all real $\mu$.
These are eigenstates of the derivative operator $\hat{p}=-8\pi i\gamma\ell_{\rm P}^2{\rm d}/{\rm d}c$ with
eigenvalues $8\pi \gamma\ell_{\rm P}^2\mu$.
(See~\cite{Fewster2008, Velhinho,Velhinho2} for more details on the Bohr compactif\/ication of the real line.)
The form of states suggests a~map between the spaces of functions on Abelian homogeneous connections and
functions on the Bohr compactif\/ication of the real line: $B\colon \rho_{\lambda,n}(g)\mapsto
\exp(i\lambda n c)$ onto the subspace spanned by all $\exp(i\mu c)$ with rational $\mu$.
With the formulas for inner pro\-ducts,~\eqref{InnProdAbel} and~\eqref{BohrInt}, it follows that this map is
an isometry, and it is a~$*$-algebra morphism and commutes with the action of $\hat{p}$.
However, it is not invertible, and therefore not unitary: one can easily f\/ind
$(\lambda_1,n_1)\not=(\lambda_2,n_2)$ such that $\lambda_1n_1=\lambda_2n_2$.\footnote{There is a~bijection
between suitable subspaces of the Bohr--Hilbert space and the Abelian homogeneous Hilbert space with
structure group $G={\rm U}(1)$.
If we take the subspace restricted by $0\leq\lambda<1$, the map $\rho_{\lambda,n}(g)\mapsto
|\mu\rangle:=|\lambda+n\rangle$ is a~one-to-one transformation to the subspace of the Bohr--Hilbert space
with rational $\mu$.
With the restriction on $\lambda$, one can, given $\mu$, uniquely determine $n$ as the integer part of
$\mu$ and $\lambda$ as $\mu-n$.
Moreover, if $\lambda_1+n_1\not=\lambda_2+n_2$, $\lambda_1\not=\lambda_2$ or $n_1\not=n_2$.
However, choices $(\lambda_1,n_1,\lambda_2,n_2)$ exist for which $\lambda_1+n_1\not=\lambda_2+n_2$ but
$\lambda_1n_1=\lambda_2n_2$; the inner product is therefore not preserved and the map is not unitary.}
Not all features of the Bohr compactif\/ication are realized in homogeneous models even of Abelian type;
care is therefore required if only the Bohr compactif\/ication is used:
\begin{enumerate}\itemsep=0pt
\item The label $\mu$ is a~degenerate version of a~pair $(\lambda,n)$ of state parameters, playing distinct
roles in holonomies and discrete dynamics.
The degeneracy of $\lambda$ and $n$ in $\mu$ is lifted by a~direct quantization of homogeneous connections
as linear maps $\phi\colon {\cal L}S\to {\cal L}G$.
\item Our new quantization of homogeneous connections easily applies to non-Abelian models, while the Bohr
compactif\/ication of ${\mathbb R}^3$ does not properly display non-Abelian features of general anisotropic
models.
Via the spectrum of our $C^*$-algebra, we obtain a~compacti\-f\/i\-ca\-tion of the space of non-Abelian
connections unrelated to the Bohr compactif\/ication.
\end{enumerate}
The Bohr compactif\/iction was introduced to loop quantum cosmology in~\cite{Bohr} by way of a~pure
minisuperspace quantization of the isotropic connection component $c$.
Compared to using a~periodif\/ication of the real line to U(1), as originally done in~\cite{IsoCosmo}, this
procedure has the advantage of faithfully representing all values of the connection component: $c$ can be
computed if exponentials $\exp(i\mu c)$ are known for all real $\mu$ (irreducible representations of
$\overline{\mathbb R}_{\rm Bohr}$), while knowing $\exp(inc)$ with integer $n$ (irreducible representations
of U(1)), allows one to compute $c$ only up to adding integer multiples of $2\pi$.
Still, this alteration of the original quantization is inadequate, as shown here.
An isotropic connection is not a~number $c$, and a~diagonal homogeneous connection is not a~triple of
numbers $c_I$, just as an inhomogeneous connection is not a~collection of scalar f\/ields.
A homogeneous connection is a~linear map from ${\cal L}S$ to ${\cal L}G$, or an element of ${\cal
L}S^*\times {\cal L}G$.
The factor of ${\cal L}S^*$ is crucial to relate the nature of a~connection as a~1-form, but it is
overlooked if one takes only the components $c_I$, or a~single $c$ for isotropic models.
The new quantization of homogeneous models provided here takes into account the correct mathematical
structure of homogeneous connections, leading to inequivalent Hilbert space representations.
In some of the following sections, we will see that these dif\/ferences are crucial for realizing
a~relation to the full theory and for some dynamical aspects.
\subsection{Minisuperspace operators and averaging}
\label{s:Aver}
Minisuperspace quantizations allow a~large set of choices regarding quantum representations, kinematical
operators, and, most of all, the dynamics.
The dynamics is the most dif\/f\/icult to derive from the full theory, requiring detailed projection maps
to ensure that one stays on the space of homogeneous states; no strict derivation is available as of now.
Fortunately, however, quantum geometry implies several general ef\/fects in the dynamics, for instance in
Hamiltonian constraint operators of loop quantum cosmology, deviating from classical expressions by the use
of holonomies and inverse-triad operators.
The form of holonomies and inverse triads, in turn, is dictated by properties of the kinematical quantum
representation used.
If one can derive the simpler setting of kinematical representations and basic operators, properties that
imply characteristic dynamics in the full theory are realized in reduced models as well.
Reliable qualitative ef\/fects can be predicted even if the dynamics is not directly derived but rather
constructed by analogy with the full theory, using reduced operators.
Given that the full dynamics so far appears to be ambiguous, too, only generic ef\/fects are reliable,
anyway.
Details of the reduction of dynamics may not matter much, provided one is asking the right questions.
Relating models to the full theory helps one decide which questions can (and should) be asked.
\subsubsection{Lattice subalgebras and spin-lattice states}
For any f\/ixed triple of integers ${\cal N}_I$, the operators $\rho_{k_I/{\cal
N}_I,j_I}(g_I)^{m_I}_{n_I}$, for all integer $k_I$, together with $\hat{p}^J_i$ form a~subalgebra of the
homogeneous holonomy-f\/lux algebra, which we call a~lattice subalgebra or, more specif\/ically, the
$(1/{\cal N}_1,1/{\cal N}_2,1/{\cal N}_3)$-lattice subalgebra.
Any state $\rho_{k_I/{\cal N}_I,j_I}(g_I)^{m_I}_{n_I}|0\rangle$, obtained by acting with a~homogeneous
lattice-subalgebra holonomy on the cyclic state $|0\rangle$ independent of $g_I$, can be written as
a~superposition
\begin{gather*}
\rho_{k_I/{\cal N}_I,j_I}(g_I)^{m_I}_{n_I}=\sum_{h_1,\ldots,h_{k_I-1}}\rho_{1/{\cal N}_I,j_I}(g_I)^{m_I}
_{h_1}\rho_{1/{\cal N}_I,j_I}(g_I)^{h_1}_{h_2}\cdots\rho_{1/{\cal N}_I,j_I}(g_I)^{h_{k_I-1}}_{n_I}
\end{gather*}
of products of elementary excitations $\rho_{1/{\cal N}_I,j_I}(g_I)^{m_I}_{n_I}$.
It can be viewed as the evaluation of a~lattice-based spin-network state in a~homogeneous connection~--
a~cylindrical state whose graph is a~lattice with straight edges and regular spacings $\ell_I=L_I/{\cal
N}_I$.
In order to make contact with inhomogeneous states, we use a~spatial lattice of the form just introduced,
with uniform spacing $\ell_I$ in direction $X_I^a$ as measured in coordinates, from links along the three
invariant vector f\/ields $X_I^a$ of a~Bianchi I model.
We require the region ${\cal V}$ of coordinate size $V_0=L_1L_2L_3$ to be suf\/f\/iciently large, to allow
many lattice links of the chosen spacings.
We must restrict attention to Bianchi I at this stage to obtain closed lattices.
For non-Abelian symmetry groups, such as those of Bianchi models other than type~I, dif\/ferent generators
do not form closed square loops by their integral curves, and therefore no lattice can be
constructed\footnote{For two generators $X_1$ and $X_2$, a~single closed loop is obtained if one uses
integral curves of the left-invariant vector f\/ield of $X_1$ and the right-invariant vector f\/ield of
$X_2$, as proposed in~\cite{APSCurved}: left-invariant vector f\/ields commute with right-invariant ones.
However, no complete lattice can be formed from these integral curves in three spatial dimensions: To
generate lattice sites, one would have to f\/ix one type of vector f\/ield, left- or right-invariant, for
each spatial direction.
If $X_1^a$ is taken as left-invariant, $X_2^a$ must be right-invariant for a~closed 2-dimensional lattice
in the $1-2$-surface.
For a~closed lattice in the $1-3$-surface, also $X_3^a$ would have to be right-invariant, but then, with
both $X_2^a$ and $X_3^a$ right-invariant, there is no closed lattice in the $2-3$-surface~-- unless
$X_2^a$ and $X_3^a$ happen to commute.
Lattice constructions based on the interplay of left- and right-invariant vector f\/ields cannot be
performed for all Bianchi types, making those constructions in the available cases (Bianchi I and II)
non-generic.
Attempts at such constructions in anisotropic models show some of the pitfalls of ad-hoc assumptions, as
illustrated by the series~\cite{ImpBianchiI,ImpBianchiII,ImpBianchiIX} of papers where most initial claims
of~\cite{ImpBianchiI}, for instance regarding averaging or a~possible relation to lattice constructions,
had to be withdrawn or weakened in later installments.
Initially simple-looking constructions became more and more contrived.
Instead, it is more general (while still not free of assumptions) to use lattices according to the
kinematical structure of Bianchi~I, and then implement other Bianchi models by suitable curvature terms in
the dynamics~\cite{Spin}.
In this way, all Bianchi class A models can be quantized with one and the same scheme.
One may worry about an inconsistency in using Bianchi~I lattices for other Bianchi models.
However, at the inhomogeneous lattice level, no strict Bianchi models can be realized.
The symmetry type just provides guidelines along the way to consistent dynamics, which can well be realized
for all class~A Bianchi models.}. As seen in Section~\ref{s:Ham}, such Bianchi models can still be
quantized at least as far as their dynamics is concerned: One would refer to Bianchi~I lattices to def\/ine
basic homogeneous variables, and implement the dif\/ferent dynamics by an additional potential in the
Hamiltonian constraint.
Lacking closed lattices, the spatial manifold structure of other Bianchi models cannot be realized in
a~quantum model.
However, this classical ingredient should not be taken too seriously, anyway, because from inhomogeneous
models the ef\/fects of loop quantum gravity are known to modify the classical space-time structure as
a~consequence of a~quantum-corrected hypersurface-deformation algebra~\cite{Action,BohrWigner}.
Lattices can be seen as a~crutch or a~helpful visualization to construct suitable state spaces on which one
can represent operators with the correct classical limit.
Once such state spaces have been obtained, they can be extended to dif\/ferent kinds of dynamics even if
lattices are no longer available.
{\sloppy Fixing an orientation for each of the three directions, we label lattice links by pairs $(v,I)$ of a~vertex
$v$ as the starting point of a~link $e_{v,I}$ in direction $X_I^a$ (as in~\cite{QuantCorrPert}).
For a~connection $A_a^i$ (not assumed homogeneous at this stage), each link gives rise to a~holonomy
$h_{v,I}= {\cal P}\exp(\int_{e_{v,I}} A_a^i\tau_i\dot{e}^a{\rm d} s)$.
We will work with spin-network states of the underlying lattice (not required to be gauge invariant), or
spin-lattice states.
Each link holonomy appears in some irreducible SU(2)-representation with spin $j_{v,I}$.
In the matrix representation $\rho_{j_{v,I}}(h_{v,I})$, we~pick matrix elements $\langle
m_{v,I}|\rho_{j_{v,I}}(h_{v,I})|n_{v,I}\rangle$, with two eigenstates $|m_{v,I}\rangle$ and
$|n_{v,I}\rangle$ of $\rho_{j_{v,I}}(\tau_3)$ (or~any other component).
The function $\langle m_{v,I}|\rho_{j_{v,I}}(h_{v,I})|n_{v,I}\rangle$ is then an eigenstate with
eigenva\-lues~$m_{v,I}$ and~$n_{v,I}$, respectively, of the 3-components of right-invariant and
left-invariant derivatives by~$h_{v,I}$.
Our spin-lattice states are therefore functions $\psi_{(j_{v,I},m_{v,I},n_{v,I})}(h){=}\prod_{v,I}
\langle m_{v,I}|\rho_{j_{v,I}}(h_{v,I})|n_{v,I}\rangle$ depending on the connection via link holonomies,
with an inner product def\/ined as usual by integrating over all $h_{v,I}$ using the Haar
measure~\cite{FuncInt}.
This def\/ines the Hilbert space ${\cal H}_{\rm lattice}$.
For short, we will write these states as $|(j_{v,I},m_{v,I},n_{v,I})\rangle$.
We have the usual action of holonomies and f\/luxes.
}
\subsubsection{Homogeneous distributions}
\label{s:dist}
A homogeneous analog of spin-lattice states, depending on holonomies $h_{\phi}(\lambda_IT_I)=
\exp(\lambda_I \phi(T_I))$, is $\psi_{(\lambda_I, j_I,m_I,n_I)}(g_J)= \prod_I \langle
m_I|\rho_j(h_{\phi}(\lambda_IT_I))|n_I\rangle= \prod_I \langle
m_I|\rho_{\lambda_I,j_I}(g_I)|n_I\rangle$ with $\lambda_I{\cal N}_I$ integer, written for short as
$|(\lambda_I,j_I,m_I,n_I)\rangle$.
There is an additional label $\lambda_I$, replacing the edge or link dependence of inhomogeneous states and
representing the ${\cal L}S$-part $X=\lambda_IT_I$ of a~homogeneous connection $\phi\in {\cal
L}S^*\otimes{\cal L}{\rm SU}(2)$, subject to the scaling condition.
The set of these states is f\/ixed by holonomies in the $(1/{\cal N}_1,1/{\cal N}_2,1/{\cal N}_3)$-lattice
subalgebra, with elementary holonomies acting by multiplication, changing the SU(2)-representations $j_I$
according to recoupling rules, and f\/lux operators having eigenvalues $8\pi\gamma\ell_{\rm P}^2m_I$ (for
right-invariant vector f\/ields) and $8\pi\gamma\ell_{\rm P}^2n_I$ (for left-invariant ones).
No decomposition as in~\eqref{Mult} is required since we have a~f\/ixed common denominator ${\cal N}_I$ for
all holonomies considered in direction $X_I$.
So far, homogeneous and inhomogeneous lattice states are def\/ined separately from each other.
We relate them by introducing a~map $\sigma\colon {\cal H}_{\rm hom}\to {\cal D}_{\rm lattice},
|(\lambda_I,j_I,m_I,n_I)\rangle\mapsto ((\lambda_I,j_I,m_I,n_I)|$ from the homogeneous Hilbert space to
distributions on the lattice Hilbert space.
This map is the key ingredient of quantum symmetry reduction, as described in
Section~\ref{s:Reduction}.
Following~\cite{SymmRed}, we def\/ine homogeneous distributions by their evaluations
\begin{gather}
((\lambda_I,j_I,m_I,n_I)|(j_{v,I},m_{v,I},n_{v,I})\rangle
\nonumber
\\
\qquad
=\langle(\lambda_I,j_I,m_I,n_I)|(j_{v,I},m_{v,I}
,n_{v,I})\rangle|_{h_{v,I}=\exp(\phi(T_I)/{\cal N}_I)}
\label{Dist}
\end{gather}
on all basis states of ${\cal H}_{\rm lattice}$.
On the right-hand side, the inner product is taken in ${\cal H}_{\rm hom}$, with
$|(j_{v,I},m_{v,I},n_{v,I})\rangle|_{h_{v,I}=\exp(\phi(T_I)/{\cal N}_I)}$ obtained by restricting the
connection dependence of the spin-lattice state to homogeneous $\phi$.
The distributional evaluation vanishes unless the representation $j_I$ appears in the tensor product
$\bigotimes_{v,I}j_{v,I}$, and $m_I=\sum_{v,I}m_{v,I}$,
$n_I=\sum_{v,I}n_{v,I}$.\footnote{In~\eqref{Dist}, we restrict to holonomies
$h_{v,I}=\exp(\lambda_I\phi(T_I))$ with $v$-independent $\lambda_I=1/{\cal N}_I$, or a~regular aligned
lattice of uniform link lengths.
At this stage, we could allow irregular lattices with varying $\ell_I(v)$, as long as all links are still
along symmetry generators $\sum_I\lambda_IT_I$.
Dif\/ferent lattice sectors would then contribute to the reduction, and ref\/inement would be necessary in
the multiplication and action of holonomies.
This option will be discussed in more detail below.} The reduction of states depends on the size of the
region ${\cal V}$ via ${\cal N}_I$, just like the classical reduction of the phase space.
\subsubsection{Averaged operators}
\label{s:AvOp}
An operator $\hat{O}$ can be reduced from the lattice theory to the homogeneous Hilbert space if its dual
action f\/ixes the space of homogeneous distributions: If there is a~$|\psi\rangle\in{\cal H}_{\rm hom}$
such that $((\lambda_I,j_I,m_I,n_I)|\hat{O}^{\dagger}|(j_{v,I},m_{v,I},n_{v,I})\rangle=
(\psi|(j_{v,I},m_{v,I},n_{v,I})\rangle$ for all $|(j_{v,I},m_{v,I},n_{v,I})\rangle$, we def\/ine
$\hat{O}|(\lambda_I,j_I,m_I,n_I)\rangle=|\psi\rangle$.
All link holonomies $\langle m_{v,J}|\rho_{j_{v,J}}(h_{v,J})|n_{v,J}\rangle$ along symmetry generators,
taken as multiplication operators, satisfy this condition.
They act on distributional homogeneous states by $\langle m_{v,J}|\rho_{j_{v,J}}(h_{v,J})|n_{v,J}\rangle
\langle g_I|(\lambda_I,j_I,m_I,n_I)\rangle= \rho_{1/{\cal N}_J,j_{v,J}}(g_J)^{m_{v,J}}_{n_{v,J}}\cdot
\rho_{\lambda_I,j_I}(g_I)^{m_I}_{n_I}$, just as in the reduced space of homogeneous states.
Flux operators require additional constructions.
A single lattice f\/lux $\hat{F}_{v,I}$ associated with a~surface dual to link $e_{v,I}$ does not map
a~homogeneous distribution to another such state: Take a~set of states
$|\psi_{v,I}\rangle:=|0,\ldots,0,(1/2,1/2,1/2),0,\ldots,0\rangle$, each with non-zero labels only on one
lattice link $e_{v,I}$.
We have
\begin{gather*}
((1/{\cal N}_I,j_I,m_I,n_I)|\hat{F}_{v,I}|\psi_{v,I}\big\rangle=4\pi\gamma\ell_{\rm P}^2\delta_{j_I,1/2}
\delta_{m_I,1/2}\delta_{n_I,1/2}
\end{gather*}
and $((\lambda_I,j_I,m_I,n_I)|\hat{F}_{v,I}|\psi_{v',I'}\rangle=0$ if $v\not=v'$ or $I\not=I'$.
Therefore,
\begin{gather*}
((1/{\cal N}_I,1/2,1/2,1/2)|\hat{F}_{v,I}|\psi_{v,I}\big\rangle\not
=((1/{\cal N}_I,1/2,1/2,1/2)|\hat{F}_{v,I}|\psi_{v',I}\big\rangle
\qquad
\text{for}
\qquad
v\not=v'.
\end{gather*}
However, we must have $(\Psi|\psi_{v,I}\rangle= (\Psi|\psi_{v',I}\rangle$ for any homogeneous state
$|\Psi\rangle\in{\cal H}_{\rm hom}$ since $\psi_{v,I}|_{h_{w,I}=\exp(\lambda_I\phi(T_I))}=
\psi_{v',I}|_{h_{w,I}=\exp(\lambda_I\phi(T_I))}$.
The state $((1/{\cal N}_I,1/2,1/2,1/2)|$ cannot be contained in a~decomposition of $((1/{\cal
N}_I,j_I,m_I,n_I)|\hat{F}_{v,I}$ in our basis, and we can repeat the arguments with arbitrary values of the
non-zero label in $|\psi_{v,I}\rangle$ to show that no homogeneous state can be contained in the
decomposition.
Therefore, the distribution $((1/{\cal N}_I,j_I,m_I,n_I)|\hat{F}_{v,I}$ cannot be a~superposition of
homogeneous distributional states: f\/lux operators associated with a~single link do not map the space of
homogeneous states to itself.
Even classically, the f\/low $\{\cdot,F_S\}$ generated by a~f\/lux operator is not everywhere tangent to
the submanifold of homogeneous connections and {\em unrestricted} triads in the inhomogeneous phase space,
but it is tangent to the subspace on which both the connection {\em and} the triad are homogeneous.
In the quantized theory, using distributional states, we have ensured states to be restricted to
homogeneous connections, but no such condition has yet been implemented for the densitized triad or
f\/luxes.
Flux operators must be averaged to generate a~f\/low that keeps the space of homogeneous states invariant.
However, non-Abelian gauge properties prevent us from simply adding\linebreak
$\sum_n \int_{S_n} E^a_in_a{\rm
d}^2y$ for a~family of surfaces $S_n$ translated along the generators of the symmetry group.
Instead, we must relate the f\/ibers of the SU(2)-bundle in which $E^a_i$ takes values, using parallel
transport between the $S_n$.
(This problem seems to be related to issues encountered in constructions of a~non-Abelian triad
representation~\cite{FluxRep}.
Here, homogeneity will help us to propose a~solution.)
To describe the specif\/ic construction, we assume an aligned state, consisting only of ho\-lo\-no\-mies
$h_{v,I}$ in the three independent directions but not necessarily forming a~regular lattice.
For an averaged $p^I_i$, we choose families of surfaces $S_{n,I}$ transversal to the symmetry generators
$X^a_I$, such that they have co-normals $n^I_a=\delta^I_a$, layered at regular intervals across the region
${\cal V}$.
Eventually, we will send the number $N$ of surfaces to inf\/inity.
Before doing so, we def\/ine a~gauge-covariant averaging by $\overline{p}^I_i=N^{-1}\sum\limits_{n=1}^N
\int_{S_{n,I}} {\rm ad}_{h_I(y)}(E^a_i(y)n_a^I){\rm d}^2y$ where $h_I(y)$ is the connection-dependent
parallel transport from some base point, chosen for each integral curve in direction~$I$, to a~point~$y$ on
the surface.
The base points will be chosen in a~state-dependent way because the state determines how the connection is
excited, usually in a~discontinuous way at lattice vertices.
We decompose a~state as a~superposition of contributions $\Psi=\psi(h_{w,J},h_{w',K}) \prod_{v_I}
\rho_{j_{v,I}}(h_{v_I,I})$ where the dependence on holonomies along directions $J$ and $K$ will not matter.
The set of all $v_I$ then gives us all vertices where parallel transport in the $I$-direction changes
discontinuously.
We will average with these vertices chosen as base-points, so that only the continuous parts of parallel
transport are taken into account.
We f\/irst decompose surfaces $S_{n,I}=\bigcup_k S_{n,I,k}$ so that each piece $S_{n,I,l}$
intersects at most one edge.
In the action of the f\/lux operator, instead of summing over $k$ we will then be summing over edges
intersecting the surface: We write{\samepage
\begin{gather*}
\frac{1}{N}\sum_{n=1}^N\int_{S_{n,I}}{\rm d}^2y{\rm ad}_{h_I(y)}\big(\hat{E}^a_i(y)n_a^I\big)\Psi
=\frac{1}{N}\sum_{n=1}^N\sum_{e_{v_I,I}\cap S_{n,I}
\not=\varnothing}{\rm ad}_{h_{v_I,I}(v_n)}\big(\hat{F}_{v_I,I}(S_{n,I,k})\big)\Psi
\end{gather*}
with $h_{v_I,I}(v_n)$ the parallel transport from $v_I$ to the intersection point $v_n$ of $e_{v_I,I}$ with
$S_{n,I}$.}
If a~piece of the surface $S_{n,I}$ intersects an edge $e_{v_I,I}$, we choose the base point to be $v_I$
(for a~right-invariant vector f\/ield quantizing the f\/lux, or the other endpoint of $e_{v_I,I}$ for
a~left-invariant one).
The adjoint action of $h_{v_I,I}(v_n)$ in the averaged f\/lux then implies that a~f\/lux operator does not
insert just $\tau_i$ in the holonomy, at the intersection point $\{v_n\}=S_{n,I}\cap e_{v_I,I}$ with the
surface, but $h_{v_I,I}(v_n)^{-1}\tau_i h_{v_I,I}(v_n)$.
For a~single edge $e$, splitting the holonomy $h_e:=h^{(1)}_e(v_n)h^{(2)}_e(v_n)$ in two pieces
$h^{(1)}_e(v_n)$ and $h^{(2)}_e(v_n)$ at an intersection point $v_n$, we thus have
\begin{gather*}
\hat{\overline{p}}{}^I_i\langle m'|\rho_j(h_e)|n'\rangle
=-8\pi i\gamma\ell_{\rm P}^2\lim_{N\to\infty}\frac{1}{N}\sum_{n=1}^N
\big\langle m'|\rho_j\big(h^{(1)}_e(v_n){\rm ad}_{h_e^{(1)}(v_n)}(\tau_i)h_e^{(2)}(v_n)\big)|n'\big\rangle
\\
\phantom{\hat{\overline{p}}{}^I_i\langle m'|\rho_j(h_e)|n'\rangle}
=-8\pi i\gamma\ell_{\rm P}^2\lim_{N\to\infty}\frac{1}{N}\sum_{n=1}^N\langle m'|\rho_j(\tau_i h_{e}
)|n'\rangle\delta_{S_{n,I}\cap e\not=\varnothing},
\end{gather*}
where only terms such that $S_{n,I}\cap e\not=\varnothing$ contribute.
For large $N$, the number of non-zero contributions divided by $N$ approaches $\lambda_e$, the ratio of the
length spanned by $h_e$ relative to $L_I$.
We obtain eigenvalues $8\pi\gamma\ell_{\rm P}^2 \lambda_e m'$.
Not surprisingly, in this homogeneous context the averaged f\/lux does not refer to any point on the edge
where it acts, but it picks up the relative length of the edge by the number of intersection points.
For multiple edges, the f\/lux acts by the product rule.
If all edges involved form a~regular lattice, with the number ${\cal N}_I=L_I/\ell_I$ of lattice links, it
follows that
\begin{gather}
\label{DerivAv}
\hat{\overline{p}}{}^I_i=\frac{1}{{\cal N}_I}\sum_v\hat{F}_{v,I,i}.
\end{gather}
The factor of $1/{\cal N}_I=\ell_I/L_I$ eliminates over-counting by adding f\/luxes of all lattice sites
along direction $I$.
In the other two directions, on the other hand, we sum rather than average because the minisuperspace
$p_i^I$ is def\/ined for a~surface stretching through the region ${\cal V}$, as in~\eqref{Deriv}.
Indeed, heuristically, the eigenvalues of $\hat{\overline{p}}{}^I_i$, $8\pi\gamma\ell_{\mathrm P}^2 {\cal N}_I^{-1}
\sum_v m_{v,I}$ can be written as $L_JL_K$ multiplying the average value of the densitized triad:
$8\pi\gamma\ell_{\mathrm P}^2{\cal N}_I^{-1} \sum_v m_{v,I}= 8\pi\gamma\ell_{\mathrm P}^2{\cal N}_J{\cal N}_K\overline{m_I}=
{\cal N}_J{\cal N}_K \overline{E^I_3}$, where $\overline{m_I}= ({\cal N}_1{\cal N}_2{\cal
N}_3)^{-1}\sum_v m_{v,I}$ is the lattice average, quantizing the average of the plaquette f\/lux
$\int E^I_3 {\rm d}x^J{\rm d}x^K/8\pi\gamma\ell_{\mathrm P}^2$.
\subsubsection{Kinematical quantization commutes with symmetry reduction}
\label{s:comm}
Holonomy operators in ${\cal H}_{\rm hom}$ are directly obtained from their dual action on ${\cal D}_{\rm lattice}$.
It does not matter whether we act with a~holonomy operator f\/irst and then symmetry-reduce, or f\/irst
reduce and then act with the corresponding homogeneous operator:
\begin{gather*}
\sigma(\rho_{1/{\cal N}_J,j_{v,J}}(g_J)|(\lambda_I,j_I,m_I,n_I)\rangle)=\rho_{j_{v,J}}(h_{v,J}
)\sigma(|(\lambda_I,j_I,m_I,n_I)\rangle).
\end{gather*}
After averaging, the same commutation relationship is realized for lattice f\/lux operators.
We have
\begin{gather*}
((1/{\cal N}_I,j_I,m_I,n_I)|\hat{\overline{p}}{}^J_3|(j_{v,I},m_{v,I},n_{v,I})\big\rangle\\
\qquad{}
=\frac{1}{{\cal N}_J}\sum_v ((1/{\cal N}_I,j_I,m_I,n_I)|\hat{F}_{v,J,3}|(j_{v,I},m_{v,I},n_{v,I})\big\rangle
\\
\qquad {} =\frac{8\pi\gamma\ell_{\mathrm P}^2}{{\cal N}_J}\sum_v m_{v,J}\delta_{j_I\in\bigotimes_v j_{v,I}}
\delta_{m_I,\sum_v m_{v,I}}\delta_{n_I,\sum_vn_{v,I}}
\\
\qquad{} =8\pi\gamma\ell_{\mathrm P}^2\lambda_Jm_J((\lambda_I,j_I,m_I,n_I)|(j_{v,I},m_{v,I},n_{v,I})\rangle
\end{gather*}
using the $\delta$-identif\/ications and $\lambda_J=1/{\cal N}_J$ in the last step.
On reduced states, on the other hand, we have $\hat{p}_3^J|(\lambda_I,j_I,m_I,n_I)\rangle= 8\pi\gamma \ell_{\mathrm P}^2
\lambda_Jm_J$ from the right-invariant vector f\/ield~\eqref{Deriv} in the $\lambda_J$-sector.
Comparing these equations, we see that $\sigma(\hat{p}^J_3 |(\lambda_I,j_I,m_I,n_I)\rangle)=
\hat{\overline{p}}{}^J_3 \sigma(|(\lambda_I,j_I,m_I,n_I)\rangle)$, with analogous calculations for other
components of $\hat{p}_i^J$.
(As remarked in Section~\ref{s:Flux}, the non-Abelian $\hat{p}_i^J$ is not a~symmetric operator
unless we are in the lattice setting of f\/ixed $\lambda_I$ for all states involved.
Similarly, $\hat{\overline{p}}{}_i^J$ is not symmetric in this situation due to averaging, in particular
with a~state-dependent choice of base points for parallel transport.)
For basic operators, it does not matter whether we quantize or reduce f\/irst.
We obtain the same representation properties as in the full theory, and the same qualitative
quantum-geometry ef\/fects.
But a~quantitative correspondence is more complicated for composite operators, especially the Hamiltonian
constraint crucial for dynamics.
\subsubsection{Holonomy-f\/lux algebra in reduced models}
The previous calculations have shown how holonomy-f\/lux representations of homogeneous mo\-dels are derived
from the full algebra.
Using the minisuperspace embedding $\sigma$, we obtain basic operators~-- holonomies and f\/luxes~-- from
their action on inhomogeneous lattice states.
Since the holonomy-f\/lux representation of the full theory is unique~\cite{WeylRep, LOST}, the
minisuperspace representation derived here, by restriction of the full algebra to lattices followed by
taking the dual action on homogeneous distributions, enjoys the same distinction for a~given lattice.
So far, we have written all formulas for the general case of non-Abelian homogeneous models.
Using the same techniques of restriction of states and reduction of operators, it is straightforward to
implement diagonalization or isotropy: We restrict states to $\phi_I^i= c_{(I)} \Lambda_I^i$ for diagonal
models, or $\phi_I^i= c\Lambda_I^i$ for isotropic ones.
Flux operators $\hat{\overline{p}}{}^I= \Lambda_{(I)}^i \hat{\overline{p}}{}^I_i$ then leave diagonal
states invariant (while $\Lambda_J^i\hat{\overline{p}}{}^I_i$ with $J\not=I$ would not), and the averaged
$\hat{\overline{p}}=\frac{1}{3} \sum_I\hat{\overline{p}}{}^I$ leaves isotropic states invariant.
These situations are covered in~\cite{InhomLattice}.
\subsection{Dynamics}
From holonomy and f\/lux operators, we construct more complicated ones such as the volume or the
Hamiltonian constraint.
The volume on spin-lattice states can be def\/ined as in the full theory, using
$|\frac{1}{6}\epsilon^{ijk}\epsilon^{IJK}\hat{F}_{v,I,i} \hat{F}_{v,J,j}\hat{F}_{v,K,k}|^{1/2}$, just
restricted to 6-valent vertices as encountered in a~lattice.
The complete spectrum is unknown in the non-Abelian case.
For simpler algebraic relations, we may replace the cubic SU(2)-invariant with a~product of quadratic
invariants, $\hat{V}_v:=\prod\limits_{I=1}^3(\hat{F}_{v,I,i} \hat{F}_{v,I}{}^i)^{1/4}$ with eigenvalues
$(8\pi\gamma)^{3/2} \ell_{\rm P}^3 \prod\limits_{I=1}^3 (j_{v,I}(j_{v,I}+1))^{1/4}$.
In what follows, details and dif\/ferences of these spectra will not play a~major role, and we will make
use of the simpler version.
As part of the formulation of dynamics, we will be interested in reducing the volume operator.
An important question for non-linear combinations of basic operators is whether the average is taken before
or after reduction.
The minisuperspace volume
\[
\prod\limits_{I=1}^3 \big(\hat{\overline{p}}{}^I_i
\hat{\overline{p}}{}^{I,i}\big)^{1/4}=({\cal N}_1{\cal N}_2{\cal N}_3)^{-1/2} \prod\limits_{I=1}^3
\sqrt[4]{\hat{J}_I^2},
\]
using $\hat{J}_I=\sum_v \hat{F}_{v,I}$ and~\eqref{DerivAv}, has eigenvalues
\begin{gather*}
\frac{(8\pi\gamma)^{3/2}\ell_{\rm P}^3}{\sqrt{{\cal N}_1{\cal N}_2{\cal N}_3}}\prod_{I=1}
^3\sqrt[4]{j_I(j_I+1)}=(8\pi\gamma)^{3/2}\ell_{\rm P}^3\prod_{I=1}^3\sqrt{\lambda_I}\sqrt[4]{j_I(j_I+1)}.
\end{gather*}
The spectrum can be computed using $\hat{p}^I_i$ in~\eqref{Deriv} or $\hat{\overline{p}}{}^I_i$
in~\eqref{DerivAv}, but it does not equal that of the averaged $({\cal N}_1{\cal N}_2{\cal
N}_3)^{-1}\sum_v V_v$, which would be the reduced volume operator.
The distinction is important for the correct size of quantum-geometry ef\/fects, as we will see below.
Pure minisuperspace models make use of $\hat{p}^I_i$ or $\hat{\overline{p}}{}^I_i$; correctly capturing
quantum ef\/fects requires an averaged volume operator.
\subsubsection{Hamiltonian constraint}
\label{s:Ham}
{\sloppy The classical Hamiltonian constraint contains curvature components, to be represented in homogeneous models
by holonomies $\rho_{\lambda,j}(g_I)^m_n$, non-polynomial functions of the connection which dif\/fer from
connection or curvature components by higher-order terms.
In Abelian models, it is easy to see that $\rho_{\lambda,n}(g_I)$ as an operator is not weakly
continuous in $\lambda$ at $\lambda=0$, and it not possible to def\/ine a~connection operator via ${\rm
d}/{\rm d}\lambda|_{\lambda=0}$: The diago\-nal matrix elements $\langle
(\lambda_I',n_I')|\rho_{\lambda,n}(g_I)|(\lambda_I',n_I')\rangle= \delta_{0,\lambda n}$,
using~\eqref{InnProdAbel}, are not conti\-nuous in $\lambda$ at $\lambda=0$.
In the non-Abelian case, the argument is more complicated because
$\langle(\lambda_I',j_I',m_I',n_I')|\rho_{\lambda,j}(g_I)^m_n |(\lambda_I',j_I',m_I',n_I')\rangle$ need not
be zero for $\lambda\not=0$.
The value rather depends on the multiplicity of the trivial representation in the tensor product
$\rho_{j_I'}^{\otimes 2q}\otimes \rho_j^{\otimes p}$ if $\lambda_I'=q/r$ and~$\lambda=p/r$ with their least
common denominator $r$.
For $\lambda=0$, we pick out the trivial representation in $\rho_{j_I}^{\otimes 2q}$; for $\lambda\not=0$
we pick out the coef\/f\/icients of all irreducible representations of $\rho_{j}^{\otimes p}$ in~$\rho_{j_I}^{\otimes 2q}$.
In general, these coef\/f\/icients are not continuously related at~$\lambda=0$.
}
Instead of using derivatives and connection operators, we are required to use holonomies
$\rho_{\delta,j}(g_I)^m_n$ with some f\/inite $\delta$ to construct the Hamiltonian constraint operator,
appealing to the standard relationship between curvature components and holonomies around small closed
loops.
Since the same relationship is used for the full constraint~\cite{RS:Ham,QSDI}, there is at least
a~plausible connection between models and the full theory.
We have
\begin{gather}
\label{HolExp}
h_{\Delta}=1+\ell^2s^a_1s^b_2F_{ab}^i\tau_i+O\big(\ell^4\big)
\end{gather}
if the loop $\Delta$, spanned by two unit vectors $s_1^a$ and $s_2^a$, is of coordinate area $\ell^2$.
In lattice constructions, one may use loops around elementary plaquettes, although consistency issues of
the constraint algebra may require more complicated routings (see e.g.\ \cite{TwoPlusOneDef}).
The coordinate area of a~loop in the $(I,J)$-plane is then close to $\ell_I\ell_J$, and we are led to use
homogeneous holonomies $\rho_{\delta_I,j_I}(g_I)$ with $\delta_I=\ell_I/L_I=1/{\cal N}_I$.
In Bianchi models, we have the Hamiltonian constraint~\eqref{HamHom}, or~\eqref{H} when diagonalized.
Putting holonomies along square loops and suitable constructions of triad operators together as in the full
theory~\cite{QSDI}, we obtain, following~\cite{cosmoIII,HomCosmo},
\begin{gather}
\hat{H}=-\frac{1}{(8\pi)^2\gamma^3G^2\hbar\delta_1\delta_2\delta_3}\sum_{I,J,K}\epsilon^{IJK}{\rm tr}
\Bigl(\rho_{\delta_I,1/2}(g_I)\rho_{\delta_J,1/2}(g_J)\rho_{\delta_I,1/2}(g_I)^{-1}\rho_{\delta_J,1/2}
(g_J)^{-1}
\nonumber
\\
\phantom{\hat{H}=}{}
\times\left|\rho_{\delta_K,1/2}(g_K)[\rho_{\delta_K,1/2}(g_K)^{-1},\hat{V}]\right|\Bigr)+\hat{H}_{\Gamma}
\label{Ham}
\end{gather}
with matrix products of all holonomies involved.
Instead of $j=1/2$ one may use other irreducible representations,
or add dif\/ferent such contributions~\cite{Gaul,AlexAmbig,AmbigConstr}.
Also the values of $\delta_I$ are subject to choices whose implications we will discuss in more detail below.
One may choose f\/ixed values, or relate them to properties of the state acted on by the constraint operator.
In the latter case, the state dependence of regularized constraints in the full theory would be modeled.
For now, however, we will assume f\/ixed $\delta_I$ so that $\hat{H}$ preserves a~lattice subalgebra.
The term $\hat{H}_{\Gamma}$ in~\eqref{Ham} vanishes for the Bianchi~I model and incorporates
spin-connection terms for other models, as in~\cite{Spin}.
The commutator $|\rho_{\delta_K,1/2}(g_K)[\rho_{\delta_K,1/2}(g_K)^{-1},\hat{V}]|$ in the se\-cond line
quantizes the classical combination $\epsilon^{ijk}E^{[a}_iE^{b]}_j/\sqrt{|\det E|}$ which diverges for
degenerate triads, at classical singularities of collapse type\footnote{The absolute value around the
commutator is necessary because the classical analog $\{A_a^i,V\}=2\pi\gamma G{\rm sgn}(\det E)
\epsilon^{ijk}E^{[a}_iE^{b]}_j/\sqrt{|\det E|}$ carries a~sign factor.
The absolute value avoids parity violation (see~\cite{FermionHolst} for a~detailed discussion of parity).
Classically, the sign of $\det E$ changes whenever the sign of $\{A_a^i,V\}$ changes, and one could
multiply the latter with ${\rm sgn}(\det E)$ to avoid parity violation.
However, when quantized, $\rho_{\delta_K,1/2}(g_K)[\rho_{\delta_K,1/2}(g_K)^{-1},\hat{V}]$ and
$\widehat{\det E}$ do not commute in non-Abelian models.
Since the dynamics identif\/ies states annihilated by the commutator as degenerate ones corresponding to
classical singularities, we refer to its own sign instead of multiplying it with the non-commuting operator
$\widehat{{\rm sgn}\det E}$~\cite{DegFull}.}. Writing this operator on the right in $\hat{H}$, in the
ordering chosen in~\eqref{Ham} as in the full theory~\cite{QSDI}, implies that singular states decouple
from the dynamics: They are automatically annihilated by the constraint.
(There is a~similar factor in~$\hat{H}_{\Gamma}$, also ordered to the right.) In this way, quantum
hyperbolicity~\cite{BSCG} is realized even if no dif\/ference equation is available as an explicit
evolution equation.
\subsubsection{Abelian models and dif\/ference equations}
Dif\/ference equations are obtained in Abelian models after transforming to the triad representation,
provided the Hamiltonian constraint f\/ixes a~lattice subalgebra.
First, writing Abelian holonomies of a~diagonal Bianchi model as $\rho_{\delta_I,1/2}(g_I)=\cos(\delta_I
c_I/2)+ 2\Lambda_I^i\tau_i\sin(\delta_I c_I/2)$, one can compute all matrix products and the trace
in~\eqref{Ham}.
Since the f\/inal result is lengthy, we def\/ine
\begin{gather*}
\hat{K}_3:=\sin(\delta_1c_1/2)\cos(\delta_1c_1/2)\sin(\delta_2c_2/2)\cos(\delta_2c_2/2)=\frac{1}{4}
\sin(\delta_1c_1)\sin(\delta_2c_2)
\end{gather*}
and cyclic permutations thereof, as well as
\begin{gather*}
\hat{I}_J=\left|2i\left(\sin(\delta_J c_J/2)\hat{V}\cos(\delta_J c_J/2)-\cos(\delta_J c_J/2)\hat{V}
\sin(\delta_J c_J/2)\right)\right|.
\end{gather*}
The Hamiltonian constraint is then
\begin{gather*}
\hat{H}=-\frac{1}{8\pi^2\gamma^3G^2\hbar\delta_1\delta_2\delta_3}\sum_{J=1}^3\hat{K}_J\hat{I}_J.
\end{gather*}
It is straightforward to compute the action of $\hat{K}_J$ and the eigenvalues of $\hat{I}_J$ on the
$(\delta_1,\delta_2,\delta_3)$-lattice subalgebra (now dropping the f\/ixed $\delta_I$ from the notation of
states):
\begin{gather*}
\hat{K}_3|n_1,n_2,n_3\rangle=-\frac{1}{16}
(|n_1+2\delta_1,n_2+2\delta_2,n_3\rangle-|n_1-2\delta_1,n_2+2\delta_2,n_3\rangle
\\
\phantom{\hat{K}_3|n_1,n_2,n_3\rangle=}{}
-|n_1+2\delta_1,n_2-2\delta_2,n_3\rangle+|n_1-2\delta_1,n_2-2\delta_2,n_3\rangle)
\end{gather*}
and cyclic permutations, and
\begin{gather*}
\hat{I}_1|n_1,n_2,n_3\rangle=|V_{n_1+\delta_1,n_2,n_3}-V_{n_1-\delta_1,n_2,n_3}||n_1,n_2,n_3\rangle
\\
\phantom{\hat{I}_1|n_1,n_2,n_3\rangle}{}
=(8\pi\gamma)^{3/2}\ell_{\rm P}^3\left|\sqrt{|n_1+\delta_1|}-\sqrt{|n_1-\delta_1|}\right|\sqrt{|n_2n_3|}
|n_1,n_2,n_3\rangle.
\end{gather*}
\looseness=-1
Since $\hat{H}|n_I\rangle$ must vanish (or equal the action of a~matter Hamiltonian $\hat{H}_{\rm matter}$
on $|n_I\rangle$), a~dif\/ference equation is obtained in the triad representation of coef\/f\/icients
$\psi_{n_1,n_2,n_3}$ in $|\psi\rangle= \sum_{n_I}\psi_{n_1,n_2,n_3} |n_I\rangle$.
Introducing $s_{n_1,n_2,n_3}:= \sqrt{|n_1n_2n_3|}\psi_{n_1,n_2,n_3}$ to shorten the
expression\footnote{Dividing by $n_1n_2n_3$ is well-def\/ined for the evolution equation because
$|n_1,n_2,n_3\rangle$ is annihilated by the constraint whenever $n_1n_2n_3=0$, as part of the property of
quantum hyperbolicity.
The coef\/f\/icients $\psi_{n_1,n_2,n_3}$ with $n_1n_2n_3=0$ decouple from the rest and can safely be
ignored.}, we have
\begin{gather}
-A_{\delta_1}(n_1)\left(s_{n_1,n_2+2\delta_2,n_3+2\delta_3}-s_{n_1,n_2-2\delta_2,n_3+2\delta_3}
-s_{n_1,n_2+2\delta_2,n_3-2\delta_3}+s_{n_1,n_2-2\delta_2,n_3-2\delta_3}\right)
\nonumber
\\
\quad{} -A_{\delta_2}(n_2)\left(s_{n_1+2\delta_1,n_2,n_3+2\delta_3}-s_{n_1-2\delta_1,n_2,n_3+2\delta_3}
-s_{n_1+2\delta_1,n_2,n_3-2\delta_3}+s_{n_1-2\delta_1,n_2,n_3-2\delta_3}\right)
\nonumber
\\
\quad{} -A_{\delta_3}(n_3)\left(s_{n_1+2\delta_1,n_2+2\delta_2,n_3}-s_{n_1-2\delta_1,n_2+2\delta_2,n_3}
-s_{n_1+2\delta_1,n_2-2\delta_2,n_3}+s_{n_1-2\delta_1,n_2-2\delta_2,n_3}\right)
\nonumber
\\
=128\pi^2\gamma^3G\ell_{\rm P}^2\delta_1\delta_2\delta_3\frac{\hat{H}_{\rm matter}(n_1,n_2,n_3)}
{V_{n_1,n_2,n_3}}s_{n_1,n_2,n_3}
\label{Evolve}
\end{gather}
with volume eigenvalues inserted.
We used partial eigenvalues of the matter Hamiltonian\footnote{For simplicity, we assume the absence of
connection couplings; see~\cite{LRSParity} for a~fermionic model in which the assumption does not hold.
No qualitative changes to the present statements occur in such a~case.}
\begin{gather*}
\hat{H}_{\rm matter}|\psi\rangle=\sum_{n_I}\left(\hat{H}_{\rm matter}(n_I)\psi_{n_1,n_2,n_3}
\right)|n_I\rangle
\end{gather*}
which may still act on a~matter-f\/ield dependence of $s_{n_1,n_2,n_3}$.
The other coef\/f\/icients refer to eigenvalues of $\hat{I}_I$, def\/ining $A_{\delta_I}(n_I):=
I_I(n_1,n_2,n_3)/V_{n_1,n_2,n_3}$ such that $A_{\delta}(n)=
|\sqrt{|n+\delta|}-\sqrt{|n-\delta|}|/\sqrt{|n|}$.
Equation~\eqref{Evolve} correctly quantizes the classical terms such as
$c_2c_3a_1=c_2c_3\sqrt{|p^2p^3/p^1|}$ in the Hamiltonian constraint~\eqref{H}, using the ordering
$|p^1|^{-1} c_2c_3 \sqrt{|p^1p^2p^3|}$.
Equations such as~\eqref{Evolve} have been derived long ago~\cite{HomCosmo}, and reproduced since then many
times, in slightly dif\/ferent forms.
We have presented the derivation here with some detail because, lacking the proper notion of homogeneous
connections, it had not been realized before that the form is valid only for a~Hamiltonian constraint
operator f\/ixing a~lattice subalgebra of the homogeneous model.
If the $\delta_I$ are not f\/ixed once and for all but depend on the state acted on (and even its graph),
or if the $\delta_I$ used in the operator are not the same as those of the lattice subalgebra, the
decomposition rule~\eqref{Mult} or its Abelian analog~\eqref{MultAbel} must be used, implying ref\/inement.
The operator~\eqref{Ham} will still be valid, but its action must be re-derived, and does not easily give
rise to a~dif\/ference equation, certainly not one of constant step-size.
We emphasize that ref\/inement is realized even if the operator~\eqref{Ham} is not modif\/ied, provided
only one applies it to states not in the lattice subalgebra f\/ixed by it.
\subsubsection{Lattice ref\/inement: a~toy model}
\label{s:RefToy}
So far, we have presented the minisuperspace quantization of Hamiltonians.
For contact with the full theory, we must try to reduce an inhomogeneous operator and face the averaging
problem.
This task, at present, cannot be done in detail, but its outcome will af\/fect the choice of $\delta_I$.
Instead of deriving these values and their relation to states, we must resort to suf\/f\/iciently general
parameterizations to model dif\/ferent possible reductions.
If the $\delta_I$ are not adapted to a~lattice or to the common denominator of all holonomies involved,
$\hat{H}$ will not f\/ix any lattice subalgebra, even if the $\delta_I$ are f\/ixed and not state-dependent.
Lattice ref\/inement then occurs by multiplying holonomies of dif\/ferent edge lengths and obeying the
decomposition rule~\eqref{Mult}.
Evaluations of the Hamiltonian constraint, especially the non-Abelian version, become more involved and
dif\/ference equations no longer are readily available, but the property of lattice ref\/inement can be
seen already in an admittedly rough toy model.
The model is certainly far from an actual derivation from some full Hamiltonian constraint.
Instead, it is meant to illustrate how the behavior of $\delta_I$ could follow from some discrete dynamics.
Although we use the language of inhomogeneous lattice states and operators, the actual dynamics is chosen
only for illustrative purposes.
Nevertheless, there are some interesting features which may be promising for a~more faithful representation
of the loop dynamics.
Let us assume that each $\delta_I$ is always half the maximum $\lambda_I$ encountered in
a~lattice-subalgebra state acted on.
In terms of an inhomogeneous lattice, this means that every new edge generated by a~vertex contribution of
the Hamiltonian constraint would go half-way to the next vertex.
In an inhomogeneous lattice, the presentation of ref\/inement depends on the order in which individual
holonomies or vertex contributions of the Hamiltonian constraint act.
If ref\/inement proceeds regularly, staying close to cubic lattices of nearly constant link lengths, one
would expect that all plaquettes will f\/irst be split half-way along edges, and when this has happened for
all of them, one would proceed to quarters and so on.
However, in the Hamiltonian constraint all vertex contributions appear in superposition, not in
simultaneous action on a~single lattice.
To realize an ordering, one may assume that a~physical state annihilated by the constraint is expanded in
spin-lattice states according to the eigenspace of some operator such as the total volume, by the maximum
spin on all edges, or by the number of plaquettes.
Ordering spin-lattice contributions in a~physical state with respect to any of these values, plaquettes
will be f\/illed in a~certain arrangement, such as the one described.
\looseness=-1
Back in our homogeneous model, starting with a~state in some lattice subalgebra with va\-lues~$\lambda_I^{(0)}$, the f\/irst action of the Hamiltonian constraint, multiplying with holonomies of lengths~$\lambda_I^{(0)}/2$ in dif\/ferent directions, requires a~decomposition~\eqref{Mult} of the whole state,
ref\/ining edge lengths to halves.
After a~single multiplication, no homogeneous holonomy of the original length $\lambda_I^{(0)}$ will occur
explicitly, but such edges are still present in a~corresponding homogeneous lattice because there are
several matrix products $\sum_k \rho_{\lambda_I^{(0)}/2,j}(g_I)^m_k
\rho_{\lambda_I^{(0)}/2,j}(g_I)^k_n$ of two $\lambda_I^{(0)}/2$-holonomies without intermediate factors.
Taking these holonomies into account, we keep acting with $\lambda_I^{(0)}/2$-holonomies until all those
products disappear.
(In inhomogeneous lattice language, we f\/ill all $\lambda_I^{(0)}$-plaquettes with new edges and vertices
at all midpoints of the original edges.) Once these options have been exhausted, the next ref\/inement step
is due, going to $\lambda_I^{(0)}/4$ until all the previously ref\/ined plaquettes have been f\/illed.
In the process just described, we have assumed a~certain ordering of the actions of individual vertex
contributions, f\/irst f\/illing all the $\lambda_I^{(0)}$-plaquettes, then moving to
$\lambda_I^{(0)}/2$-plaquettes, and so on, as we would do on an inhomogeneous lattice.
\looseness=-1
We can relate the number of plaquettes, or the degree of ref\/inement, to geometrical quantities of the
whole lattice.
Starting with a~nearly homogeneous lattice with all edge spins $j_0$ equal, the initial area in the
$J,K$-plane is approximately $A_0=(\lambda_I^{(0)}j_0)^2 {\cal N}_J{\cal N}_K$, with ${\cal N}_J{\cal N}_K$
plaquettes in this plane and transversal links of the size $\lambda_I^{(0)}= L_I/{\cal N}_I$.
Thus, $A_0=L_I^2 j_0^2 {\cal N}_J{\cal N}_K/{\cal N}_I^2$, or $A_0\approx V_0^{2/3}j_0^2$ for a~nearly
isotropic lattice with equal edge numbers in the three directions.
When all these plaquettes have been ref\/ined after the f\/irst stage, the maximum spins have changed to
$j_1=2j_0+1/2$: multiplying $j_0$ with two because we decompose holonomies halfway according
to~\eqref{Mult}, doubling them over, and adding $1/2$ from the action of a~new holonomy in the fundamental
representation.
The added $1/2$ will soon be irrelevant when $j$ becomes larger by repeated doubling.
The area has then increased to $A_1=(\lambda_I^{(1)})^2(2j_0)^2 (2{\cal N}_J)(2{\cal N}_K)$ with
$\lambda_I^{(1)}= \lambda_I^{(0)}/2$.
Combining these equations, $A_1=4A_0$, in which only the increased spin due to ref\/inement contributes.
After $N$ steps, the same arguments show that the area has increased to $A_N=2^N A_0$, and
$\lambda_I^{(N)}= \lambda_I^{(0)}/2^N= \lambda_I^{(0)} \sqrt{A_0/A_N}$.
The spin quantum numbers increase by $j_N\approx 2^Nj_0$.
\looseness=1
It is an interesting feature that the spin of a~single action (here $1/2$), an ambiguity parameter of the
full constraint, becomes progressively less important in the model as ref\/inement proceeds, increasing
$j_N$.
The large-scale behavior is insensitive to details of the microscopic dynamics and associated ambiguities,
a~property that makes ef\/fective and mean-f\/ield viewpoints meaningful.
In this model, the edge lengths $\lambda_I^{(N)}$ are inversely proportional to the square root of the
area, or to a~linear measure of the extension of the lattice.
With near isotropy, this scaling, $\lambda=\lambda_0/a$, is of advantage for holonomy-modif\/ied dynamics,
in which holonomies $\exp(i\lambda c)=\exp(i\lambda_0 c/a)$ depend on the isotropic connection component
$c$ only in the combination $c/a\propto {\cal H}$ proportional to the Hubble parameter; the same behavior
has been proposed in~\cite{APSII} as an ad-hoc choice for the actual dynamics of loop quantum cosmology.
While $c$ may grow large even at small curvature, for instance if there is a~positive cosmological
constant, ${\cal H}$ remains small in low-curvature regimes.
The ref\/ined dynamics, with a~non-constant $\lambda$, is more well-behaved in semiclassical regimes.
In diagonal anisotropic models, the ref\/inement behavior described here implies that holono\-mies depend on
the connection by the combinations $c_I/\sqrt{|p^I|}$, as in~\cite{BianchiIBounce}, $|p^I|$ being
proportional to the area $A$ of the plane transversal to the $I$-direction.
This ref\/inement is problematic in terms of stability properties of the dif\/ference equation it
implies~\cite{SchwarzN}.
A dif\/ferent ref\/inement scheme in which $\lambda_I$ is inversely proportional to the length of the
$I$-direction is preferable~\cite{ImpBianchiI,SchwarzN, SchwarzNHol}; a~more advanced action of the
Hamiltonian constraint not f\/ixing a~lattice subalgebra would be required, a~ref\/inement scheme in which
$\delta_I$ depends not only on the length $\lambda_I$ of its own direction but also on the other two links
meeting at a~vertex.
\subsubsection{Dif\/ference equations with mean-f\/ield ref\/inement}
Lattice ref\/inement is the homogeneous realization of discrete dynamical processes in the full theory;
ideally, its form would be derived by reducing a~full Hamiltonian constraint.
A dynamical state of quantum gravity should in general be expected to have dif\/ferent lattice structures
and spacings at dif\/ferent times, or on dif\/ferent spatial slices, especially in loop quantum gravity
whose Hamiltonians are generically graph-changing.
The number of lattice sites is then a~dynamical parameter.
Indeed, if the ${\cal N}_I$ or the $\ell_I$ are kept constant~-- we always assume $L_1$, $L_2$ and $L_3$
to be constant as these are classical auxiliary parameters~-- cosmic expansion would quickly blow up the
discreteness scale, $\ell_I\sqrt{|p^1p^2p^3|}/|p^1|$ as measured in a~diagonal Bianchi geometry, to
macroscopic sizes.
Lattice ref\/inement must be a~key feature of quantum-gravity dynamics.
Dynamical minisuperspace operators such as the Hamiltonian constraint should not refer to constant ${\cal
N}_I$ or $\delta_I$, as assumed so far in~\eqref{Ham} and~\eqref{Evolve}, but to parameters that depend on
the total volume or the scale factor via an evolving discrete state.
Strict dif\/ference equations of loop quantum cosmology then do not exist, even in Abelian models, and
approximations cannot always be derived easily.
The correct evolution equation in a~triad representation would rather have to implement the changing number
of degrees of freedom, a~problem studied in other contexts as well~\cite{CanSimp,EvolvingHilbert,PH}.
Instead of working with such complicated equations, there are two approximation schemes that help to f\/ind
properties of solutions: Ef\/fective equations and dif\/ference equations in redef\/ined variables.
\begin{description}\itemsep=0pt
\item[Ef\/fective equations] describe properties of solutions of dif\/ference equations in Abelian mo\-dels
via a~non-canonical basic algebra, the discreteness implemented by using exponentials of the connection.
For instance, we would represent a~discrete degree of freedom $(c,p)$ by a~non-canonical basic pair
$(\exp(i\delta c),p)$ with a~closed linear algebra under Poisson brac\-kets.
If $\delta$ depends on $p$ by a~power law $\delta(p)=\delta_0 |p|^x$ as a~form of lattice ref\/inement,
$(\exp(i\delta_0 |p|^xc),|p|^{1-x})$ still satisf\/ies a~closed linear algebra~\cite{BounceCohStates}.
We then generate evolution by a~Hamiltonian much like~\eqref{Ham}, depending on $\exp(i\delta(p) c)$
according to the regularization chosen.
Ef\/fective quantum evolution equations then follow the general scheme of~\cite{EffCons, EffAc} and provide
approximate information about ref\/ining solutions.
While strict dif\/ference equations are not available in lattice-ref\/ining Abelian or in non-Abelian
models, ef\/fective equations can still be formulated and solved in both cases.
\item[Approximate dif\/ference equations] in re(de)f\/ined variables model ref\/ined quantum evo\-lu\-tion by
dif\/ference equations equally spaced not in the original triad eigenvalues, but rather in some redef\/ined
versions obtained as non-linear functions of them, such as power laws.
One can derive a~suitable equidistant parameter if one knows how the $\delta_I$ depend on $n_J$ in the
ref\/ining case.
Instead of an equation~\eqref{Evolve} with $n_J$-dependent increments, for instance in
$s_{n_1,n_2+2\delta_2(n_1,n_2,n_3), n_3+2\delta_3(n_1,n_2,n_3)}$, one can sometimes work with an
equidistant dif\/ference equation in re(de)f\/ined independent variables.
If $\delta_I$ depends only on $n_I$ with the same value of $I$, we def\/ine $\bar{n}_I(n_I):=\int_0^{n_I}
(\delta_I(z))^{-1}{\rm d}z$ such that $\bar{n}_I(n_I+\delta_I(n_I))= \int_0^{n_I+\delta_I(n_I)}
(\delta_I(z))^{-1}{\rm d}z= \bar{n}_I(n_I)+ \int_{n_I}^{n_I+\delta_I(n_I)} (\delta_I(z))^{-1}{\rm d}z=
\bar{n}_I(n_I)+ \delta_I(n_I)(\delta_I(n_I))^{-1}(1+O(\delta_I'(n_I)))=
\bar{n}_I(n_I)+1+O(\delta_I'(n_I))$, a~constant increment in regions in which the derivative
$\delta_I'(n_I)$ is suf\/f\/iciently small.
If $\delta_I(n_I)\propto |n_I|^x$ is a~power law with $x<0$ for ref\/inement, the equidistant approximation
is good at large~$n_I$ but not for small~$n_I$, where the quantum dynamics remains ambiguous, anyway.
(For $\delta_I\propto|n_I|^x$, we have an equidistant equation in $\bar{n}_I\propto |n_I|^{1-x}$,
corresponding to the new $p$-dependent variable used for ef\/fective equations.) One may also redef\/ine
the whole dif\/ference equation in terms of $\bar{n}_I$ with constant increments, dropping
$O(\delta_I'(n_I))$-terms as a~specif\/ic choice of factor ordering~\cite{SchwarzN}.
If $\delta_I$ depends not just on $n_I$ with the same $I$, a~redef\/inition is more complicated to derive.
If $\delta_1\delta_2\delta_3$ is proportional to a~power of $|n_1n_2n_3|$, $\delta_1\delta_2\delta_3\propto
|n_1n_2n_3|^x$ such that ref\/inement does not introduce additional anisotropy, one can always f\/ind one
equidistant variable given by
\begin{gather*}
N(n_1,n_2,n_3):=\int_0^{n_1}\int_0^{n_2}\int_0^{n_3}
(\delta_1(z_1,z_2,z_3)\delta_2(z_1,z_2,z_3)\delta_3(z_1,z_2,z_3))^{-1}{\rm d}z_1{\rm d}z_2{\rm d}z_3
\\
\hphantom{N(n_1,n_2,n_3)}{} \propto
|n_1n_2n_3|^{1-x}
\end{gather*}
related to the total volume~\cite{SchwarzN}.
This choice resembles Misner variables~\cite{Mixmaster}, which refer to the volume (or scale factor) and
two anisotropy parameters.
\end{description}
A state dependence of dynamical operators, underlying lattice ref\/inement, may seem unexpected.
After all, the dynamical operators are used to derive evolving states; how can properties of such states
enter the def\/inition of dynamical operators or the dif\/ference equations they imply? Taking reduction
seriously, it turns out that state dependence is unavoidable.
In minisuperspace models, we cannot formulate a~dynamical operator from f\/irst principles, or if we do so,
the results are fraught with minisuperspace artefacts because full properties of the discreteness are
ignored.
As described before, reduced dynamics is supposed to model the full dynamics of a~symmetric state, to be
projected back to the space of symmetric states after each application of the evolution operator, a~process
that includes the decomposition rule~\eqref{Mult} used crucially in our toy model of ref\/inement.
A minisuperspace evolution operator obtained by reduction must encode both the full Hamiltonian constraint
and properties of the projection.
The latter depends on the evolving state to be projected back on the symmetric space.
While the precise form remains complicated to determine, we see how a~state dependence of the end result is
obtained.
Without a~detailed method to perform dynamical reduction, the phase-space dependence of parameters such as
$\delta_I$ or ${\cal N}_I$ is inserted in equations only after the operator has been formulated, as a~kind
of mean f\/ield describing microscopic properties not directly accessible at the minisuperspace level.
Such details and ambiguities are relevant at small scales and at higher curvature, or in strong quantum
regimes.
These regimes can be understood only by general ef\/fects, such as quantum hyperbolicity, but away from
deep quantum regimes, ef\/fective and mean-f\/ield pictures are meaningful and useful.
\subsubsection{Ad-hoc modif\/ications in pure minisuperspace models}
\label{s:adhoc}
We have presented a~quantization of homogeneous models which has a~tight link with the full theory and,
unlike previously existing versions, applies in non-Abelian cases.
Lattice ref\/inement naturally arises as a~consequence of state-dependent regularizations as in the full
theory, combined with a~reduction of all states, including physical ones, to the space of homogeneous loop
quantum cosmology.
Lattice ref\/inement is important for consistent dynamics, for a~f\/ixed lattice expanded by cosmic
evolution would either be coarse at the present time, or would have to start at tiny spacings, orders of
magnitude below the Planck length, to be unnoticeable in current observations.
Lattice ref\/inement as a~dynamical process ensures that the discreteness scale does not need to follow
cosmic expansion; it can remain small at a~constant or slowly-changing value as macroscopic events happen
on larger regions.
With a~concrete realization of lattice ref\/inement, we can look back at minisuperspace modif\/ications
that have been proposed in the hope of obtaining appropriate dynamics, and see how justif\/ied their
assumptions are from the perspective of the new picture.
The most commonly used ad-hoc modif\/ication is a~change of classical basic variables before isotropic
minisuperspace quantization, in which $c/a\propto \dot{a}/a$ appears in holonomies, and the role of $p$ is
played by the volume.
We f\/irst note that f\/luxes necessarily result as reduced operators in the derived basic algebra, not
other powers of densitized-triad components or the volume\footnote{It is possible to construct f\/lux
operators from the volume operator, viewing the latter as some kind of basic operator~\cite{Flux}.
However, for the present purposes one cannot substitute the volume for f\/luxes because no linear basic
algebra would result for the def\/inition of quantum representations and their averaging and reduction.}.
Basic operators or linear functions of them are directly reduced by reference to the commutation result of
Section~\ref{s:comm}.
Non-linear functions such as the volume, on the other hand, are more complicated to average or reduce
exactly, with no currently known procedure to do so\footnote{Moreover, the volume operator usually used in
homogeneous models, and also here, is a~simplif\/ied version of the cubic SU(2)-invariant of the full
theory.
The assumed simplif\/ication of the much more complicated full spectrum does not follow from reduction but
is put in by hand.
When details of the eigenvalues are important, for instance when one uses the volume as the independent
variable of dif\/ference equations, the simplif\/ied spectrum could lead to additional artefacts, not
covered by the methods of this article.}. The basic holonomy operators therefore act by shifts on the f\/lux
spectrum by constant amounts (of $p$ in isotropic models), not the volume spectrum.
If constant shifts of the volume spectrum have dynamical advantages, as in the model of~\cite{APSII}, they
cannot be derived by direct use of basic operators but only after re(de)f\/ining variables as in the
preceding subsection.
The volume can be used as a~basic variable only as a~modif\/ication within a~pure minisuperspace
quantization, without reduction and a~justif\/ied analog in the full theory.
\looseness=-1
The modif\/ications proposed in~\cite{APSII} have been motivated in holonomy-based expressions for
$F_{ab}^i$ in the constraint by referring to geometrical areas $a^2\ell^2$ instead of coordinate ones
$\ell^2$, where $a$ is the scale factor of a~Friedmann--Lema\^{\i}tre--Robertson--Walker model to be
quantized.
However, just as tensor components $F_{ab}^i$ depend on coordinates, it is the coordinate area $\ell^2$
which should be used in the expansion~\eqref{HolExp}, not geometrical areas obtained using the metric or
densitized triad.
(Contracting $F_{ab}^i$ with the two vector f\/ields provides a~scalar.
However, for the coordinate area $\ell^2$ to be the correct factor in the expansion, the vector f\/ields
must be normalized using a~background metric.
Changing coordinates and retaining normalization then makes the contracted version transform.)
If these and other ad-hoc assumptions, for instance about factor ordering, are dropped, dynamical equations
are much more ambiguous than usually realized or admitted.
More-involved constructions of lattice ref\/inement are required, which capture necessary projections of
the dynamical f\/low back on the space of symmetric states.
Exact projections being largely unknown, the dynamics can be obtained only in parameterized ways,
faithfully taking into account ambiguities~\cite{InhomLattice,CosConst}.
At this dynamical stage, the construction of minisuperspace operators currently proceeds by analogy with
full operators, not by derivation.
\subsection{Quantum-geometry corrections}
Using holonomies instead of curvature or connection components implies quantum-geometry corrections in the
dynamics.
There is a~second type of ef\/fect, called inverse-triad correction, which comes from the fact that an
inverse of the densitized triad appears in the Hamiltonian constraint of gravity and in matter
Hamiltonians, but f\/lux operators have discrete spectra containing zero.
No inverse f\/lux operators exist, but the inverse densitized triad can be quantized to a~densely def\/ined
operator using classical rewritings following~\cite{QSDI,QSDV}.
In the Hamiltonian constraint, inverse-triad operators appear in the gravitational part (giving rise to
dif\/ferences of volume eigenvalues in~\eqref{Evolve} and in matter Hamiltonians).
Holonomy corrections are controlled by the parameters $\lambda_I$, or by the values $\delta_I$ chosen for
a~constraint operator.
Inverse-triad operators entering~\eqref{Ham} via $|\rho_{\delta_K,1/2}(g_K)
[\rho_{\delta_K,1/2}(g_K)^{-1},\hat{V}]|$ are built using the same type of holonomies, and so their
corrections refer to the same lattice scales $\delta_I$ as holonomy corrections.
Both corrections are therefore linked to each other, and comparing the explicit forms of corrections allows
one to estimate which one might be dominant in a~given regime.
Like holonomy corrections, the size of inverse-triad corrections depends on the values of $\delta_I$ and
requires a~proper consideration of lattice structures.
However, there is an additional operator, the volume $\hat{V}$, used crucially in the def\/inition of
inverse-triad operators as commutators.
For this operator, the same question must be asked as for holonomies, namely what lattice scale it refers
to.
In a~local lattice picture, as in the full theory, one should expect the relevant volume to be the one
associated with a~single lattice site or a~spin-lattice vertex, just as the holonomies used correspond to
single lattice links.
However, incorporating the volume in this way is not as obvious as for holonomies, and so dif\/ferent
versions have been considered, making use of macroscopic volumes~\cite{ScalarHolEv} or even one associated
with the artif\/icial integration region ${\cal V}$.
In this subsection, we derive in detail the form of inverse-triad operators and the corrections they imply.
To simplify commutator calculations involved, we will present the main equations for Abelian models and
brief\/ly comment on non-Abelian ef\/fects later.
\subsubsection{Local and non-local lattice operators}
Working with lattice spin-network states, one can def\/ine dif\/ferent f\/lux operators which all give rise
to the same f\/lux when averaged to minisuperspace operators.
This situation complicates constructions in pure minisuperspace models and has led to considerable
confusion.
Only relating models to the full theory, completing the kinematical reduction, can solve these issues.
\begin{description}\itemsep=0pt
\item[Local lattice operators:] We begin with the local f\/lux operator, able to show any inhomo\-genei\-ty
realized in the lattice model: $\hat{F}_{v,I}$, taken for a~plaquette transversal to a~sur\-fa\-ce~$X^a_I$ and
intersecting only one edge $e_{v,I}$ starting at the vertex~$v$.
We choose the surface to be a~square of coordinate area $\ell_J\ell_K=\lambda_J\lambda_KL_JL_K$, so that we
can view $\hat{F}_{v,I}$ as a~quantization of the classical $\lambda_J\lambda_Kp^I(v)$, where $p^I(v)$ is
an inhomogeneous diagonal component making the homogeneous variables position dependent.
The conjugate variable $\lambda_Ic_I$ is quantized via local holonomies $h_{v,I}=\exp(i\int_{e_I} c_I{\rm
d}s)$.
(Recall our Abelian simplif\/ication in this subsection.) These local lattice operators satisfy the
commutator algebra
\begin{gather}
\label{Local}
[\hat{h}_{v,I},\hat{F}_{v',I'}]=-8\pi\gamma\ell_{\rm P}^2\delta_{v,v'}\delta_{I,I'}\hat{h}_{v,I}.
\end{gather}
\item[Minisuperspace operators:] If each surface used for local f\/lux operators is centered at the
intersection point with $e_{v,I}$, the union of all those that have the same $I$-coordinate as $v$ form
a~surface stretching through the whole integration region ${\cal V}$, without overlap of non-zero measure.
Including an average in the transversal direction, we can view the lattice sum
$\hat{\overline{p}}{}^I={\cal N}_I^{-1}\sum_v \hat{F}_{v,I}$ according to~\eqref{DerivAv} as the
f\/lux quantizing the minisuperspace variable $p^I=L_JL_K\tilde{p}^I$.
Its conjugate variable $c_I=L_I\tilde{c}_I$ in minisuperspace is quantized by holonomies, $h_I=\exp(i c_I)$
for an edge stretching through the whole integration region in direction $X^I_a$.
We have the commutator
\begin{gather}
\label{MiniAlg}
[\hat{h}_I,\hat{p}^J]=-8\pi\gamma\ell_{\rm P}^2\delta_I^J\hat{h}_I
\end{gather}
for minisuperspace operators, correctly quantizing~\eqref{PoissonAbel}.
\item[Non-local operators:] There is a~version of operators between local lattice and minisuperspace ones.
We can average local lattice f\/luxes $\hat{F}_{v,I}$ over the lattice rather than sum as in
$\hat{\overline{p}}{}^I$, or reduce the size of the minisuperspace f\/lux $\hat{\overline{p}}{}^I$ by
dividing by the number of vertices in a~surface, and def\/ine
\begin{gather}
\label{Mini}
\widehat{\overline{F_I}}=\frac{1}{{\cal N}_1{\cal N}_2{\cal N}_3}\sum_{v}\hat{F}_{v,I}=\frac{1}{{\cal N}
_J{\cal N}_K}\hat{\overline{p}}{}^I.
\end{gather}
This f\/lux operator refers to the lattice spacing but, via averaging, includes all lattice sites in the
integration region.
We will call it the non-local f\/lux operator.
With a~local holonomy, it obeys the commutator relation
\begin{gather}
\label{LocalNonLocal}
[\hat{h}_{v,I},\widehat{\overline{F_J}}]=-\frac{8\pi\gamma\ell_{\mathrm P}^2}{{\cal N}_1{\cal N}_2{\cal N}_3}\delta_{IJ}
\hat{h}_{v,I}.
\end{gather}
In~\eqref{LocalNonLocal}, the number ${\cal N}_1{\cal N}_2{\cal N}_3$ of lattice sites in a~region ${\cal
V}$ replaces the coordinate vo\-lu\-me~$V_0$ of~\eqref{PoissonAbel}.
At a~technical level, $1/{\cal N}_1{\cal N}_2{\cal N}_3$ comes about as the product of $1/{\cal N}_J{\cal
N}_K$ in the plane average~\eqref{Mini}, and a~factor of $1/{\cal N}_I$ because only one out of ${\cal
N}_I$ lattice links along direction $I$ provides a~non-zero commutator
$[\hat{h}_{v,I},\hat{F}_{v',I}]\propto \delta_{v,v'}$ according to~\eqref{Local}.
\end{description}
It may seem questionable to use local holonomies and non-local f\/luxes within the same
setting~\eqref{LocalNonLocal}, but a~consistent and closed algebra of basic operators is obtained in this
way (provided the ${\cal N}_I$ are f\/ixed).
Whether such operators are meaningful physically is another question which we will soon discuss.
For now, our motivation for looking at such a~mix of local and non-local operators is that it has been used
(implicitly or explicitly) in several proposals to formulate inverse-triad corrections.
{\sloppy Properties of basic operators in the dif\/ferent algebras can be translated into one another and are
mutually consistent.
If $|(\mu_I)\rangle$ denotes non-local f\/lux eigenstates with $\widehat{\overline{F_J}}|(\mu_I)\rangle=
8\pi\gamma\ell_{\mathrm P}^2 \mu_J|(\mu_I)\rangle$, the holonomy-f\/lux algebra~\eqref{LocalNonLocal} determines the
action $\hat{h}_{v,I}|\mu_I\rangle= |(\mu_I+1/{\cal N}_1{\cal N}_2{\cal N}_3)\rangle$ of local holonomies.
Constant shifts of f\/lux eigenvalues result for a~f\/ixed lattice.
The quantized densitized-triad component $p^I=L_JL_K\tilde{p}^I$ is obtained from the averaged f\/lux as
$\hat{\overline{p}}{}^I={\cal N}_J{\cal N}_K \widehat{\overline{F_I}}$.
Its eigenvalues change under the action of a~basic holonomy operator by $p^I= 8\pi\gamma\ell_{\mathrm P}^2 {\cal
N}_J{\cal N}_K \mu_I \mapsto 8\pi\gamma\ell_{\mathrm P}^2 {\cal N}_J{\cal N}_K (\mu_I+ 1/{\cal N}_1{\cal N}_2{\cal
N}_3)= p^I+1/{\cal N}_I$.
This dependence of the constant shift on ${\cal N}_I$ is consistent with the form $\exp(i c_I/{\cal N}_I)$
of local holonomies $\exp(i\ell_I\tilde{c}_I)$, with $c_I=L_I \tilde{c}_I$, $\ell_I/L_I= 1/{\cal N}_I$, and
$\{c_I,p^J\}= 8\pi\gamma G \delta_I^J$.
Notice the dif\/ferent behaviors of the non-local averaged f\/lux $\widehat{\overline{F_I}}$ and the
minisuperspace densitized-triad component $\hat{\overline{p}}{}^I$, corresponding by its factor of $L_JL_K$
to the f\/lux through a~complete plane in the region~${\cal V}$.
}
The general form of the algebra of basic operators does not depend much on whether the local~$\hat{F}_I$,
the non-local~$\widehat{\overline{F_I}}$ or the minisuperspace $\hat{p}^I$ is used; the latter two dif\/fer
from each other just by constant factors at the kinematical level (disregarding lattice ref\/inement).
However, $\widehat{\overline{F_I}}$ and~$\hat{p}^I$ are much less local then~$\hat{F}_{v,I}$ and therefore
unsuitable for local expressions such as quantized Hamiltonians or inverse-triad corrections.
\subsubsection{Inverse-triad corrections}
The gravitational part of the Hamiltonian constraint contains a~factor of $\epsilon^{ijk}\epsilon_{abc}
E^b_jE^c_k/\sqrt{|\det E|}$ in which one divides by the determinant of $E^a_i$, and similar terms occur in
matter Hamiltonians.
Flux operators and the volume operator having zero in their discrete spectra, no densely def\/ined inverse
exists to quantize $1/\det E$ directly.
Instead, one makes use of the classical identity~\cite{QSDI}
\begin{gather}
\label{InvClass}
2\pi\gamma G{\rm sgn}(\det E)\frac{\epsilon^{ijk}\epsilon_{abc}E^b_jE^c_k}{\sqrt{|\det E|}}
=\left\{A_a^i,\int\sqrt{|\det E|}{\rm d}^3x\right\}
\end{gather}
and quantizes the Poisson bracket to a~commutator of the form
\begin{gather}
\label{InvQuant}
\frac{1}{i\hbar}\left(\hat{h}_e^{-1}[\hat{h}_e,\hat{V}]-\hat{h}_e[\hat{h}_e^{-1},\hat{V}]\right)=\frac{1}
{i\hbar}\left(\hat{h}_e\hat{V}\hat{h}_e^{-1}-\hat{h}_e^{-1}\hat{V}\hat{h}_e\right).
\end{gather}
In a~lattice model, holonomies refer to lattice links, or $\rho_{\delta,j}(g_I)$ in their reduction.
The vo\-lu\-me operator is expressed via f\/luxes, and here local f\/lux operators are used, given the local
form of the classical Poisson bracket in~\eqref{InvClass} and of the commutator in~\eqref{InvQuant} which
depends only on vertex contributions to $\hat{V}$ lying on the edges used in $\hat{h}_e$.
At this stage, minisuperspace models can easily become misleading because their most immediate f\/lux
operators~$\hat{p}^I$ or~$\hat{\overline{p}}{}^I$, proportional to $\hat{\overline{F_I}}$ are non-local.
The wrong form and size of inverse-triad ef\/fects then results.
We now present the detailed derivation of inverse-triad corrections based on local f\/lux operators, as
in~\cite{Springer,InflTest}, and then show how non-local versions dif\/fer.
The simplif\/ied volume operator of Abelian models is $\hat{V}=\sum_v
|\hat{F}_{v,1}\hat{F}_{v,2}\hat{F}_{v,3}|^{1/2}$, summed over all vertices of a~spin-lattice state.
In expressions such as~\eqref{InvQuant}, it suf\/f\/ices to look at contributions from all lattice-aligned
$\hat{h}_{v,I}$.
A~single such commutator is then
\begin{gather*}
\hat{I}_{v,I}
=\frac{\big|\hat{h}_{v,I}^{\dagger}\hat{V}\hat{h}_{v,I}-\hat{h}_{v,I}\hat{V}\hat{h}_{v,I}^{\dagger}\big|}
{8\pi\gamma G\ell_{\rm P}^2}=\frac{\left|\hat{h}_{v,I}^{\dagger}\sqrt{|\hat{F}_{v,I}|}\hat{h}_{v,I}
-\hat{h}_{v,I}\sqrt{|\hat{F}_{v,I}|}\hat{h}_{v,I}^{\dagger}\right|}{8\pi\gamma G\ell_{\rm P}^2}
\sqrt{|\hat{F}_{v,J}\hat{F}_{v,K}|}
\end{gather*}
to \looseness=1 be summed over all $I$.
(Classically, the combination of holonomies and f\/luxes corresponds to $|F_{v,I}|^{-1/2}
\sqrt{|F_{v,J}F_{v,K}|}$.
We have used an absolute value around the commutator as in~\eqref{Ham}.) For Abelian holonomies it is easy
to simplify the inverse-triad operator, making use of the commutator $[\hat{h}_{v,I}, \hat{F}_{v,I}]=
-8\pi\gamma\ell_{\rm P}^2 \hat{h}_{v,I}$ from~\eqref{Local} and the reality condition
$\hat{h}_{v,I}^{\dagger}\hat{h}_{v,I}=1$.
Commuting holonomies past f\/lux operators then gives $\hat{h}_{v,I}^{\dagger}|\hat{F}_{v,I}|^{1/2}
\hat{h}_{v,I}= |\hat{F}_{v,I}+8\pi\gamma\ell_{\rm P}^2|^{1/2}$, and therefore
\begin{gather}
\label{Inv}
\hat{I}_{v,I}=\frac{\left|\sqrt{|\hat{F}_{v,I}+8\pi\gamma\ell_{\rm P}^2|}-\sqrt{|\hat{F}_{v,I}
-8\pi\gamma\ell_{\rm P}^2|}\right|}{8\pi\gamma G\ell_{\rm P}^2}\sqrt{|\hat{F}_{v,J}\hat{F}_{v,K}|}.
\end{gather}
In strong quantum regimes, non-Abelian features should be relevant~\cite{BoundFull} and inverse-triad
ef\/fect compete with holonomy and higher-curvature terms; however, the form~\eqref{Inv} still plays
a~characteristic role in ef\/fective actions~\cite{Action}.
The expression~\eqref{Inv} is a~good approximation in perturbative settings with
$F_{v,I}>8\pi\gamma\ell_{\rm P}^2$, where it may be used to estimate
qualitative ef\/fects or potential observational tests~\cite{LoopMuk,InflConsist,InflTest}.
Since inverse-triad operators are local~-- commutators $\hat{h}_e[\hat{h}_e^{-1},\hat{V}]$ provide
contributions only for vertices on $e$ even if the volume operator for the full region ${\cal V}$ is
used~-- their commutators refer to local $\hat{F}_{v,I}$ in $\hat{V}=\sum_v
|\hat{F}_{v,1}\hat{F}_{v,2}\hat{F}_{v,3}|^{1/2}$, not to the minisuperspace operator $\hat{p}^I$ or the
non-local $\widehat{\overline{F_I}}$.
Inverse-triad corrections therefore depend on $F_{v,I}\pm 8\pi\gamma\ell_{\mathrm P}^2$, where the Planckian addition
can easily be a~signif\/icant contribution to the eigenvalue or expectation value of $\hat{F}_{v,I}$, the
f\/lux through an elementary lattice site\footnote{In fact, if $F_{v,I}$ is Planckian, with lattice spins
near $1/2$ for the fundamental representation, as often assumed, inverse-triad corrections are large.
Geometry must be suf\/f\/iciently excited above fundamental spins (some kind of ground state) for good
semiclassical states to result.}. Had we used the average $\widehat{\overline{F_I}}$, the algebra would have
led us to $\overline{F_I}\pm 8\pi\gamma\ell_{\mathrm P}^2/{\cal N}_I{\cal N}_J{\cal N}_K$, with corrections not only
much suppressed by dividing by the large number of lattice sites but also depending on the size of the
arbitrary region ${\cal V}$ chosen\footnote{Sometimes, it is suggested to take a~limit of $V_0\to\infty$,
or ${\cal N}_I\to\infty$, viewing a~f\/inite $V_0$ as a~regulator.
The procedure removes any ${\cal V}$ dependence and makes inverse-triad corrections disappear.
However, as discussed in more detail in the next section, this reasoning is misguided: $V_0$ is not
a~regulator because its value does not at all af\/fect the classical theory.
Classical models with dif\/ferent $V_0$ produce the same physics, and so they should all be quantizable,
without an ef\/fect of $V_0$.
Moreover, the limit of ${\cal N}_I\to\infty$ is not consistent with the basic algebra of averaged
operators.}. Such operators would be incorrect; they are based on the confusion of the correct average
$\overline{\sqrt{F_I}}$ with the non-local $\sqrt{\overline{F_I}}$.
\subsubsection{Local quantum corrections}
We have distinguished three types of constructions for composite operators quantizing a~symmetric model:
the minisuperspace treatment using $\hat{h}_I=\widehat{\exp(i c_I)}$ and $\hat{p}^J$ with
algebra~\eqref{MiniAlg}, chimerical constructions with (local) link holonomies $\hat{h}_{v,I}$ but
non-local f\/luxes $\widehat{\overline{F_J}}$ with algebra~\eqref{LocalNonLocal}, and f\/inally local
lattice operators built from $\hat{h}_{v,I}$ and $\hat{F}_{v,J}$ with algebra~\eqref{Local}.
Local and minisuperspace treatments dif\/fer from each other by the order in which reduction and
composition of operators are done.
In non-local models, as in traditional minisuperspace versions, one f\/irst postulates or derives the
reduced basic operators $\hat{h}_I$ and $\widehat{\overline{F_J}}$ (or $\hat{\overline{p}}{}^J$) and their
algebra, and in a~second step constructs composite operators of the form ${\cal O}_{\text{non-local}}(\hat{h}_I,\widehat{\overline{F_J}})$ from them by simple insertions, following analogous steps
taken in the full theory.
In local quantizations, one f\/irst constructs operators ${\cal O}_{\rm
local}(\hat{h}_{v,I},\hat{F}_{w,J})$, adapting the full techniques to lattice states, and then restricts
them to a~quantized homogeneous model.
The second, local method is more complicated because it must deal with the reduction of non-basic,
composite operators or their averaging.
Tractable techniques exist only in rare cases, and therefore the main ef\/fects, for instance in the
Hamiltonian constraint, are incorporated by parameterizations or mean-f\/ield techniques as in
Section~\ref{s:RefToy}~-- an unsurprising feature given that local methods are analogous to
a~transition from microscopic Hamiltonians to tractable models of large-scale ef\/fects in condensed-matter
physics.
Despite technical dif\/f\/iculties, the local viewpoint has several clear advantages: it produces the
correct sizes of quantum corrections and naturally gives rise to lattice ref\/inement.
As already noted, the misrepresentation of quantum corrections in non-local models can easily be seen for
inverse-triad operators, or the key ingredient ${\cal O}=|F_I|^{1/2}$.
Non-local operators make use of the averaged f\/lux before taking the square root, quantizing ${\cal O}$ as
$\hat{{\cal O}}_{\text{non-local}}=|\widehat{\overline{F_I}}|^{1/2}$.
A local quantization, by contrast, leads to $\hat{{\cal O}}_{\rm local}= \overline{|\hat{F}_{v,I}|^{1/2}}$,
the over-line now indicating restriction to homogeneous states after taking the square root.
While linear combinations of the basic operators commute with averaging (Section~\ref{s:comm})~--
producing similar-looking basic algebras in local and non-local versions~-- non-linear combinations do not.
In non-linear combinations of the basic operators, drastic deviations between local and non-local operators
can therefore result, but only the local version correctly captures properties of the full theory in which
no averaging is done.
Local composite operators can be formulated only when the full lattice structure is taken into account, but
after reduction they refer to reduced degrees of freedom as suitable for a~quantization of a~classically
reduced symmetric model.
Minisuperspace and non-local operators may be formally consistent without direct reference to a~lattice,
provided one chooses the length parameter $\ell_I=\delta_IL_I$ of holonomies in some way, for instance
related to the Planck length or to full area eigenvalues, but at this stage the models become ad-hoc.
Moreover, in spite of their formal consistency, non-local composite operators do not provide the correct
form of corrections in operators that refer to f\/luxes or the volume, most importantly inverse-triad
corrections.
In addition to making inverse-triad corrections sizeable and interesting, the local treatment taking into
account inhomogeneity, has another implication regarding the physical evaluation of models.
There is just one set of parameters, $\delta_I$ or equivalently ${\cal N}_I=1/\delta_I$, which determines
the magnitude of holonomy {\em and} inverse-triad corrections.
It is not possible to ignore one of the corrections and focus only on the other, unless one can show that
a~regime of interest leads to values of $\delta_I$ that make one correction dominate the other.
In general, the two corrections are not strictly related to each other, because holonomy corrections are
sensitive to the classical curvature scale relative to the Planck density, while inverse-triad corrections
are sensitive to the local discreteness scale $F_{v,I}$ relative to the Planck area, as seen in~\eqref{Inv}.
A detailed, state-dependent analysis, taking into account inhomogeneous quantum geometry, is required to
estimate both corrections.
\section{Limitations of minisuperspace models}
\label{s:limit}
Minisuperspace models of quantum cosmology never provide exact solutions to full quantum gravity.
In some cases, deviations can be strong, for instance when unstable dynamics of neglected degrees of
freedom (a classical property) enlarges the mismatch between symmetric and less-symmetric solutions, which
at an initial time may have been the consequence only of a~mild violation of uncertainty
relations~\cite{MiniValid}.
The discreteness of loop quantum cosmology shows a~much larger class of minisuperspace limitations, for
discreteness is not easily reconciled with homogeneity.
As always, such limitations should be pointed out and discussed in detail, not to slander but to warn.
At the level of states and basic operators, homogeneous wave functions can be derived in precise terms,
using the distributional constructions of~\cite{SymmRed}, as recalled in Section~\ref{s:dist}.
Intuitively, averaging a~discrete state over a~continuous symmetry group cannot result in a~normalizable
wave function in the original Hilbert space (or even a~meaningful density matrix), but it is well-def\/ined
as a~distribution.
At this level, discreteness is not problematic and does not introduce ambiguities.
At the dynamical stage, however, discrete space-time structures with possible ref\/inement or (even if
there is no ref\/inement) reference to the local discreteness scale are more complicated and more
ambiguous, as occasionally pointed out well before loop quantum cosmo\-lo\-gy was introduced~\cite{River,UnruhTime, Weiss}.
Loop quantum cosmology has provided means to analyze such situations.
\subsection{Parameters}
\label{s:Param}
In addition to phase-space variables, a~strict minisuperspace model has only the parameter $V_0=L_1L_2L_3$
to refer to, appearing in the symplectic structure~\eqref{Poisson}.
There is no analog of $\ell_I=\lambda_IL_I= L_I/{\cal N}_I$, the discrete lattice scales.
And yet, the local quantum dynamics of the full theory, together with the quantum corrections it implies,
depend on the parameters $\lambda_I$ via holonomies around loops used to quantize $F_{ab}^i$ or along edges
used in commutators with the volume operator to quantize inverse triads.
It may not be obvious how exactly edge lengths $\lambda_IL_I$ enter quantum corrections, owing to a~certain
conceptual gap between the coordinate dependent $L_I$ or $\ell_I$ and geometrical aspects in this
background-independent formulation, as well as quantization ambiguities.
But quantum corrections certainly cannot depend on $V_0$, which is chosen at will (the coordinate size of
a~region used to reduce the symplectic structure) and knows nothing about the discrete scale.
If $V_0$ or any of its factors $L_I$ appears in quantum corrections, an artif\/icial dependence on
coordinates and the chosen region results, as well as wrong sizes of quantum ef\/fects.
The authors of~\cite{APSII} proposed to modify the strict minisuperspace dynamics in a~fashion that
successfully eliminates the $V_0$-dependence at least in holonomy corrections.
(Inverse-triad corrections could not be represented meaningfully in this scheme.) As a~consequence in
isotropic models, the Hubble parameter rather than $c=\gamma\dot{a}$ appears in holonomies, and the
discrete dynamics proceeds by constant steps of the volume $V_0a^3$, not of the densitized triad
$V_0^{2/3}p$.
Heuristically, as recalled in Section~\ref{s:adhoc}, one can argue for this scheme by identifying
geometrical areas $a^2\ell^2$, instead of coordinate areas $\ell^2$, with the Planck area when specifying
the size of holonomy modif\/ications.
Holonomy corrections depending on the Hubble parameter $\dot{a}/a$ rather than $\dot{a}$ have the advantage
of being easily coordinate independent.
If the Planck length~-- or a~parameter close to it such as the smallest non-zero area eigenvalue of the
full theory~-- is chosen as the discrete scale, modif\/ications $\sin(\ell_{\mathrm P} {\cal H})/\ell_{\mathrm P}$ of ${\cal H}$
result, independent of coordinates and of $V_0$.
Holonomy corrections then refer simply to the curvature radius relative to the Planck length (or, via the
Friedmann equation, the density scale relative to the Planck density), a~parameter which can easily be
estimated in regimes of interest.
As a~general scheme, however, the procedure suf\/fers from several problems:
\begin{enumerate}\itemsep=0pt
\item If the volume is used as a~basic variable, following~\cite{APSII}, one introduces a~sign choice by
hand, allowing all real values for $v=\pm V_0a^3$.
(Otherwise, $i\partial/\partial V$ is not essentially self-adjoint, and $\exp(t \partial/\partial V)$ not
unitary.
One would have to use the methods of af\/f\/ine quantum gravity~\cite{AffineQG} for acceptable
quantizations, but it has not been shown that this can be compatible with the use of holonomies.) In
f\/luxes, the sign appears automatically thanks to the orientation of triads~\cite{IsoCosmo}, and
$\exp(t\partial/\partial p)$ is unitary.
\item The Planck length in $\sin(\ell_{\rm P}{\cal H})/\ell_{\rm P}$ enters by a~mere postulate, which
cannot be avoided because the quantization, still at the minisuperspace-level, does not have access to
discrete structures.
One may use the full area spectrum to guess what the scale might be, but such a~procedure leaves open the
question of what structure or eigenvalues a~dynamical discrete state might give rise to.
Moreover, the minisuperspace area or volume spectrum does not have a~smallest non-zero eigenvalue.
The spectrum, seen in~\eqref{Deriv}, is discrete, with all eigenstates normalizable, but on the
non-separable kinematical Hilbert space the spectrum still amounts to a~continuous set of numbers as
eigenvalues.
One has to go slightly beyond minisuperspace models by referring to the full area spectrum, which does have
a~smallest non-zero eigenvalue, but still no reduction is performed.
In this way, the scheme becomes improvised, heuristic, and ad-hoc.
\item Going beyond strict minisuperspace quantizations is not a~bad thing; in fact, it is required for
realistic modeling.
However, schemes following~\cite{APSII} do not provide justif\/ications for the detailed way in which one
tries to go beyond.
Moreover, while they give rise to meaningful results for holonomy corrections, inverse-triad corrections
from~\eqref{Inv} are not modeled properly.
These corrections depend on the ratio of the discreteness scale $|\langle\hat{F}\rangle|$ to the Planck
area.
If one assumes that the discreteness scale is exactly the Planck area, inverse-triad corrections would
merely result in a~constant factor, not af\/fecting the dynamics much at f\/irst sight.
But the factor dif\/fers from one, and it has dynamical ef\/fects even if it changes just slightly.
In other attempts, $|\langle\hat{F}\rangle|$ was related to macroscopic areas~\cite{ScalarHolEv}, sometimes
even involving $V_0$, for instance by using areas related to the size of the re\-gion~${\cal V}$.
These proposals ignore the fact that there is only one discrete structure that both holonomy and
inverse-triad corrections can refer to, as well as their local nature.
\item To counter inappropriate references to $V_0$ in non-local quantizations, the parameter is sometimes
treated as a~regulator to be sent to inf\/inity after quantization.
Such a~formal limit would undo all inverse-triad corrections, leaving only the classical inverse in
dyna\-mi\-cal equations.
However, the limit does not exist at the level of operators~-- if it existed, it would result in an
inverse of the triad operator, which is not densely def\/ined.
One can perform the limit at the level of the dif\/ference equation for wave functions, or in ef\/fective
equations.
But while this is formally possible, the overall quantization procedure would no longer be coherent.
After all, Abelian loop quantum cosmology has dif\/ference equations for states because curvature is
replaced by holonomies, resulting in a~true modif\/ication $\sin(\ell_I {\cal H})/\ell_I$ for ${\cal H}$.
The limit $\ell_I\to0$ or $\lambda_I=1/{\cal N}_I\to 0$, which would send holonomy operators to derivatives
by $p$, does not exist at the operator level.
The Hamiltonian constraint is quantized to a~dif\/ference operator with non-zero step-size.
At the level of wave equations, acting with the operator on states in the triad representation, the limit
does exist and produces a~version of the Wheeler--DeWitt equation~\cite{SemiClass}.
If one insists on re\-moving the ``regulator'' $V_0$ by sending it to inf\/inity at the level of wave
equations, one should also remove the true regulators $\ell_I$ in holonomies at the same level.
The dynamics of loop quantum cosmology would then be no dif\/ferent from Wheeler--DeWitt dynamics.
In the new homogeneous quantization of this article, the limit $L_I\to\infty$ or $V_0=L_1L_2L_3\to\infty$
is impossible at f\/ixed $\delta_I$, because $g_I=\exp(L_I\tilde{\phi}(T_I))$ has no such limit.
For edge lengths $\ell_I=\delta_IL_I$ in $\rho_{\delta_I,j_I}(g_I)$ to remain f\/inite, one would have to
take the limit $\delta_I\to0$ simultaneously with $L_I\to\infty$, but then the dif\/ference equation would
become a~dif\/ferential equation, and loop quantum cosmology would, again, reduce to Wheeler--DeWitt
quantum cosmology.
Instead, one must be able to derive models for arbitrary values of $L_I$, such that observables are
independent of them.
\item The parameter $V_0$, in contrast to $\delta_I$ in holonomy modif\/ications, is not a~regulator
because it does not modify the classical theory.
Classical models can be formulated with all f\/inite choices of~$V_0$, producing the same dynamics and
observables.
(Dif\/ferent choices of~$V_0$ to some degree resemble dif\/ferent normalizations of the scale factor.
Rescaling~$V_0$ is not a~canonical transformation as it changes the symplectic structure~\eqref{Poisson}.
Classically, this is not a~problem, but one cannot expect a~unitary transformation at the quantum level,
making the issue in quantum theory more complicated.) It should then be possible to formulate also quantum
dynamics for all possible choices, or else~$V_0$ would acquire more physical meaning than it deserves.
Another problematic feature of regularization attempts is a~possible topology dependence.
If one looks at a~model of closed spatial slices, for instance the FLRW model with positive curvature, one
cannot send~$V_0$ to inf\/inity.
Instead, it may seem natural to use the full spatial coordinate volume as a~distinguished value of~$V_0$.
(But again, one may equally well formulate the classical dynamics with dif\/ferent values of~$V_0$,
choosing dif\/ferent coordinates on the unit 3-sphere or integration regions smaller than the whole
sphere.) It is sometimes argued that some ef\/fects, such as inverse-triad corrections, are meaningful or
non-zero only with closed spatial topologies, but not for the f\/lat, non-compactif\/ied FLRW model.
Not surprisingly for a~quantization based on non-local f\/luxes, quantum dynamics would then suf\/fer from
a~strong violation of locality, depending on the global spatial topology even in its elementary changes.
Such models in cosmology would also be hard to test empirically.
One would have to know the spatial volume~-- and whether it is compact or not~-- before one can estimate
quantum ef\/fects and make predictions.
A more detailed discussion is given in~\cite{Springer}.
\end{enumerate}
The many conf\/licting comments that can be found in the literature following~\cite{APSII}, for instance
regarding the size of inverse-triad corrections, attest to the complicated and incomplete state of
af\/fairs in this scheme.
Sometimes, a~single paper may claim that inverse-triad corrections are too small to be signif\/icant, and
at the same time can be changed at will by tuning the value of $V_0$.
Although it is not always realized by all authors, such conf\/licting statements spell out limitations of
pure minisuperspace models.
\subsection{Parameterizations}
By introducing ``holonomies'' as functions of the Hubble parameter rather than the connection component,
one mimics an $a$-dependent $\ell_I=\lambda_IL_I\propto 1/a=1/\sqrt{|p|}$.
As $a$ or $p$ changes and the universe expands or contracts, the lattice spacing evolves.
Although there is no explicit creation of new vertices, the number ${\cal N}$ of lattice sites must change,
for a~f\/ixed $V_0$ with changing $\lambda_I$ implies an evolving ${\cal N}=1/\lambda_1\lambda_2\lambda_3$.
Deriving a~precise functional form for $\lambda_I(p^J_i)$ would require one to formulate a~correspondence
between full discrete dynamics and reduced minisuperspace dynamics, including projections of evolved states
onto the space of symmetric states.
Lacking such complicated constructions, one can use phenomenological input to restrict possible forms of
$\lambda_I(p^J_j)$, or $\lambda_I(p^J)$ in diagonal anisotropic models, for instance dif\/ferent exponents
of power-laws $\lambda(p)\propto |p|^x$ in isotropic models with a~real parameter $x$~\cite{InhomLattice}.
If $\lambda$ is constant ($x=0$, corresponding to~\cite{IsoCosmo}), the discreteness scale would be
magnif\/ied by cosmic expansion, presumably making it noticeable in observations.
Since no discreteness has been seen, $x=0$ or values close to it are ruled out.
The suggestion of~\cite{APSII} amounts to $x=-1/2$, with constant discreteness scale, and is compatible
with observations.
However, a~constant discreteness scale is not in agreement with full constraint operators changing vertex
structures and local volume values, not just the number of vertices.
On average over many individual actions of the Hamiltonian constraint and on large scales, cosmic
minisuperspace dynamics may be close to $x=-1/2$ as in the toy model presented in
Section~\ref{s:RefToy}, but this value cannot be realized precisely.
The choice of $x=-1/2$ (or its generalization to anisotropic models) is compatible with most cases of
cosmic evolution, but it has problems with black-hole models~\cite{Consistent}.
By its construction using geometrical areas of the region ${\cal V}$, the scheme relates the number of
vertices to the total volume of spatial regions.
Near the horizon in homogeneous coordinates of the Schwarzschild interior, the spatial volume shrinks,
making the number of lattice sites small.
However, the regime is supposed to be semiclassical for large black-hole mass, which is in conf\/lict with
a~small number of lattice sites, implying noticeable discreteness.
The analysis of dif\/ferent models~-- cosmological ones and those for black holes~-- shows that there
cannot be a~single universal power-law exponent for $\lambda_I(p^J)$ in all regimes.
Discrete quantum dynamics and ref\/inement behavior, just as the underlying state, depend on the regime
analyzed.
The role of coordinate choices hints at another important issue, namely how much the condition of
covariance and anomaly freedom restricts possible ref\/inement schemes.
This question remains largely unexplored owing to the complicated nature of the quantum constraint algebra,
but see~\cite{ScalarHol} for an interesting cosmological example that suggests restrictions, also pointing
at a~value near $x=-1/2$.
These phenomenological indications notwithstanding, a~demonstration that $x=-1/2$ or a~value near it is
more than an ad-hoc choice in cosmological models would require some kind of derivation from the full
theory.
For this feat, in turn, one would need to solve the problem of the semiclassical limit of unrestricted loop
quantum gravity, which remains one of the most pressing and most complicated problems of the f\/ield.
\subsection{Reduction}
Lattice ref\/inement in dif\/ference and ef\/fective equations refers to state parameters, most importantly
$\lambda_I$, depending on a~geometrical variable such as the total volume $V$.
One may view the appearance of $V$ as an internal time, on which the evolving state depends.
A possible procedure of implementing such a~dependence, as alluded to in Section~\ref{s:RefToy},
would be to write a~full state as a~superposition $\sum_V\psi_V$ of contributions $\psi_V$ belonging
to some f\/ixed volume eigenvalue $V$.
One would decompose a~dynamical state as an expansion in eigenstates of the internal-time ope\-ra\-tor, such as
the volume.
Although the procedure would be dif\/f\/icult owing to the complicated volume spectrum and the fact that
one would have to solve for a~dynamical state f\/irst, it is in line with standard treatments of internal
time.
The states appearing in the decomposition of~$\psi_V$ in the spin-network basis then show what discrete
structures are realized at a~given vo\-lu\-me~$V$, and the spacing as well as the number of vertices might
certainly change as one moves from one~$V$ to the next.
\looseness=1
After decomposing a~dynamical state as $\sum_V\psi_V$, one would still have to adapt it to
near-homogeneous geometries, that is implement the projection back to symmetric states.
Additional state-dependent parameters may arise, all to be modeled by suitable functions
$\lambda_I(p^J_j)$, the only parameters that survive with exact homogeneity.
Such functions would then be inserted in dynamical equations of reduced models, for instance in
dif\/ference equations of Abelian \mbox{models}.
Notice that one must know some features of the full evolution of a~state before def\/ining the reduced
Hamiltonian constraint, which in a~second step can be used to evolve a~reduced state.
Since the Hamiltonian constraint is one of the operators to be averaged for reduction, evolution and
reduction are not independent processes in the construction of models.
As a~consequence, reduced Hamiltonian constraints are state-dependent, even more so than the full
constraint operator with its state-dependent regularization of~\cite{QSDI}.
If for a~precise reduction we must know how to evolve a~full state, why do we not work with the full
evolved state rather than its reductions? The advantage of reduced models is that they of\/fer additional
approximation schemes, for instance in the derivation of observables or of ef\/fective equations.
However, reduced models can never provide exact predictions~-- if their predictions were exact, one would
not be dealing with a~reduced model.
It does not make much sense to derive physical quantities, for instance bounce densities, in exact terms
within minisuperspace models because the models themselves are not exact.
Only general ef\/fects, such as quantum hyperbolicity, the presence of bounces under certain conditions, or
qualitative low-curvature corrections may be meaningful predictions, but not specif\/ic values of some
parameters related to the discreteness scale.
\subsection{Spin-foam cosmology?}
Spin-foam cosmology~\cite{SFC,EffSFC, LQCStepping} attempts to enlist spin-foam techniques to address
quantum-cosmological questions by embedding a~simple structure with f\/initely many edges in a~spatial (or
space-time) manifold $\Sigma$.
Such a~map is clearly dif\/ferent from the (mini-)superspace embedding ${\cal M}\to{\cal S}$ used for
classical reductions, or a~map $\sigma\colon {\cal H}_{\rm hom}\to {\cal D}_{\rm inhom}$ of state spaces
used for quantum reductions.
Looking back at our discussion in Section~\ref{s:Reduction}, the question therefore arises what kind
of construction spin-foam cosmology can provide.
As usually emphasized in this context, spin-foam cosmology aims at a~description of quantum cosmological
space-times without making use of reductions, rather describing physics in a~full theory of quantum gravity
in which inhomogeneous modes are still present and quantum f\/luctuate.
According to the classif\/ication in Section~\ref{s:Reduction}, such a~non-reduction scheme could
only be selection or projection, but there is certainly no control over the full non-symmetric solution
space, let alone the averaging problem, within spin-foam cosmology.
For this reason, spin-foam cosmology does not fall within our classif\/ication of reduction schemes.
If one wanted to change this conclusion, one would f\/irst have to clarify the precise relation between an
inhomogeneous amplitude and the proposed isotropic one in spin-foam cosmology.
So far, one just obtains a~new model from a~full one by inserting isotropic labels in the evaluation, which
is not enough for a~classif\/ication of the relationship between dif\/ferent models attempted in this
setting.
In fact, it is not clear in which sense~-- or if at all~-- spin-foam cosmology describes symmetric models.
It is true that inhomogeneous modes have not been truncated but remain present and may f\/luctuate,
potentially a~feature that would allow one to go beyond reduced models (see
also~\cite{SymmQFT,SymmStatesInt} in the canonical setting).
However, spin-foam cosmology at present lacks conditions that would ensure inhomogeneous modes to be
suf\/f\/iciently small for the models to be considered symmetric, not just in their f\/luctuations but even
in their expectation values.
The graphs used in spin-foam cosmology refer to f\/initely many degrees of freedom, often related
heuristically to the number of degrees of freedom of homogeneous minisuperspaces.
However, counting degrees of freedom is not enough to ensure that a~model is good.
One might simply def\/ine a~f\/inite-dimensional ``minisuperspace'' by picking some point $x_0$ in
space-time and considering only the metric components $g_{ab}(x_0)$ as degrees of freedom, a~model which
would be meaningless because of its dependence on the space-time gauge.
By using spatial embeddings, of graphs instead of points $x_0$, spin-foam cosmology is in danger of
producing models close to the one just sketched.
(Indeed, the status of covariance remains unclear in spin-foam cosmology as well as full spin foams.)
What is missing in this context is a~well-def\/ined analog of the map $\sigma$ for states used in loop
quantum cosmology.
Only such an object could tell whether the correct degrees of freedom have been captured.
Another question, related to the topics of this article, is how spin foams Abelianize.
The f\/inal equations often produced in this context resemble dif\/ference equations of Abelian loop
quantum cosmology, even though the starting point has SU(2) degrees of freedom.
No clear Abalianization step has been provided.
Finally, working with f\/ixed graphs embedded in space, spin-foam cosmology has not given rise to
ref\/inement models.
\subsection{Evaluation of models of quantum cosmology
So far, we have discussed the construction of reduced and other models.
Their limited nature regarding the dynamics requires care also, and especially, when they are evaluated for
physi\-cal predictions.
In addition, there are caveats which apply to any construction in quantum gravity, and so to model systems
as well.
To guarantee that models, obtained in a~suf\/f\/iciently parameterized way to ensure their genericness, can
indeed be evaluated reliably, one must use evaluation methods or approximations that do not bring in hidden
assumptions about the ge\-ne\-ral form of ef\/fects.
Important questions such as the problem of time, deparameterization\footnote{Deparameterization has become
something of a~method of choice in quantum cosmology, and models in which physical Hilbert spaces are
derived in this way are often called ``complete quantizations''. However, as a~quantization of a~space-time
theory, such constructions can be considered complete only when one has shown that results do not depend on
the choice of internal time.
No such demonstration has been given in the models proposed so far.
See also the discussion in~\cite{ReducedKasner}.}, and potential signature change enter here.
Moreover, in spite of the ubiquitous use of ``ef\/fective equations'', in most cases they are based on
a~misinterpretation of the classical {\em limit} presented in~\cite[Section~4.3]{Bohr},
based on~\cite{SemiClass}, as a~semiclassical approximation.
Accordingly, important quantum corrections have been missed in many analyses: While some $\hbar$-terms are
kept in $\ell_{\rm P}$-related holonomy corrections, quantum back-reaction terms of the same order are
dropped.
For details on these important issues, we refer to the review~\cite{Stellenbosch}.
\section{Conclusions}
Much work remains to be done to establish reduced models of loop quantum gravity as well-def\/ined and
controlled approximations, and as reliable sources of detailed predictions in high-curvature regimes.
As discussed in Section~\ref{s:Reduction}, an analogous problem must be faced even classically in
relating non-symmetric geometries to symmetric ones: the averaging problem.
A complete understanding of quantum minisuperspace models as approximations of the full theory, even if
a~reduction mechanism is included, can be obtained only when the classical averaging problem is better
understood.
Lacking a~general solution, no approach to quantum gravity is yet able to produce a~complete derivation of
reduced models.
Nevertheless, with suf\/f\/icient care one can make progress and render it at least likely that all crucial
ef\/fects of the full theory are captured.
That non-Abelian ef\/fects should play some role in loop quantum cosmology and require caution has been
emphasized quite some time ago regarding specif\/ic properties of inverse-triad
corrections~\cite{BoundFull} as well as general properties of homogeneous models~\cite{DegFull}, but it has
not often been realized.
The present article of\/fers several new observations and constructions to this end: We have pointed out
that most considerations made so far in loop quantum cosmology suf\/fer from Abelian artefacts, related to
the use of function spaces on the Bohr compactif\/ication of the real line, following~\cite{Bohr}.
To correct this oversight, a~new quantization of homogeneous connections is developed in
Section~\ref{s:Mini}, which starts from non-Abelian models and takes into account the complete
structure of invariant connections.
The resulting Hilbert space representation, when restricted to Abelian variables, is related to
Bohr-quantized models by an isometric $*$-algebra morphism, but one that is not unitary or bijective.
We lose information when we map states to the Bohr--Hilbert space, corresponding to the edge-spin degeneracy
inherent in previous models.
The edge-spin degeneracy of holonomies is removed by the new quantization in non-Abelian and Abelian
models, giving a~better handle on lattice structures and the relation to the full theory.
We have strictly related basic operators in the full theory and in models, and showed how quantum ef\/fects
in composite operators can be captured by local quantizations.
The averaging required can lead to unexpected features~-- as seen in detail for f\/lux operators~-- which
one would not endeavor to implement in a~pure minisuperspace quantization without being prompted by the
relationship with the full theory.
Several implications have been demonstrated, especially regarding lattice ref\/inement and the form and
sizes of quantum-geometry corrections, most importantly those due to inverse triads.
We emphasize that our constructions started with the realization of def\/iciencies in current quantizations
based solely on Abelian models; seeing lattice ref\/inement or local features was not the main aim but
nevertheless resulted as an unavoidable consequence.
Section~\ref{s:limit} has provided cautionary remarks, detailing the current incomplete status of
the f\/ield and providing some guidelines for evaluations and the approach to physicality.
\subsection*{Acknowledgements}
The author is grateful to the anonymous referees for several helpful comments and suggestions.
This work was supported in part by NSF grants PHY-0748336 and PHY-1307408.
\pdfbookmark[1]{References}{ref}
|
{
"timestamp": "2013-12-31T02:09:59",
"yymm": "1206",
"arxiv_id": "1206.6088",
"language": "en",
"url": "https://arxiv.org/abs/1206.6088"
}
|
\section{Introduction}
To explain the cosmic acceleration predicted from the Ia type supernova
observations (Perlmutter, S. et al. 1999; Riess, A. G. et al. 1998)
one popular wayout is to modify the stress energy tensor part, i.e., the right
hand side of the Einstein's field equation. Existence of
some unknown matter termed as Dark Energy(DE hereafter) is been assumed (Riess,
A. G. et al 2004; Perlmutter, S. et al 1998;
Garnavich, P. M. et al 1998; Bachall, N. A. et. al. 1999; Copeland, E. J. et.
al. 2006) which violates the strong energy condition.
The simplest candidate of DE is a tiny
positive cosmological constant($\Lambda$) which obeys the equation of state (EoS
hereafter), $w=-1$. But
due to low energy scale than the normal scale for constant
$\Lambda$, the dynamical $\Lambda$ was introduced (Caldwell, R. R. et. al.
1998).
Again at very early stage of universe the energy scale for varying
$\Lambda$ is not sufficient. So to avoid this problem, known as
cosmic coincidence (Steinhardt, P. J. et. al. 1999), a new field, called
tracker field
(Zlatev, I. et. al. 1999) was prescribed. In similar way there are many models
(Sahni, V. and Starobinsky, A. A. 2003) in Einstein gravity to best fit the
data. Yet its
require some modifications. From this point of view some
alternative models are evolved. Most of the DE models
involve one or more scalar fields with various actions and with or
without a scalar field potential (Maor, I. et. al. 1998). Now, as
the observational data permits us to have a rather time varying
EoS, there are a bunch of models characterized by
different scalar fields such as a slowly rolling scalar field
(Quintessence) ($-1<\omega<-1/3$, $\omega(=p/\rho)$, being the EoS parameter)
(Caldwell, R. R. et. al. 1998), k-essence
(Armendariz - Picon et. al. 2000), tachyon (Sen, A. 2002), phantom ($\omega<-1$)
(Caldwell, R. R. 2002), ghost condensate
(Arkani-Hamed, N. et. al. 2004; Piazza, F and Tsujikawa, S. 2004), quintom (
Feng, B. et. al. 2005), Chaplygin gas models
(Kamenshchik, A. Y. et. al. 2001) etc. Some recent reviews on DE models are
described in the ref. (Copeland, E.J. et. al. 2006; Li, M. et. al.2011).
While explaining evolution of the universe, various DE models have been
proposed, all of which must be constrained by astronomical
observations. In all the models, the EoS parameter $\omega$ plays a
key role and can reveal the nature of DE which
accelerates the universe. Different equations of state lead to
different dynamical changes and may influence the evolution of the
universe. The EoS parameter $\omega$ and its time
derivative with respect to Hubble time are currently constrained
by the distance measurements of the type Ia supernova and the
current observational data constrain the range of equation of
state as $-1.38<\omega<-0.82$ (Melchiorri, A. et. al. 2003). Recently, the
combination
of WMAP and Supernova Legacy Survey data shows a significant
constraint on the EoS
$\omega=-0.97^{+0.07}_{-0.09}$ for the DE, in a flat universe
(Seljak, U. et. al. 2006). Recently some parametrization for the variation of
EoS
parameters $\omega(z)$ have been proposed describing the DE component.
1. $\omega(z)=\omega_{0}+\omega_{1}z$ (Cooray, A. R. and Huterer, D. 1999). Here
$w_{0}=-1/3$ and
$w_{1}=-0.9$ with $z<1$. This grows increasingly unsuitable for $z>1$. So the
following model has been proposed.
We will call this parametrization as $`linear~ parametrization'$.
2. $\omega(z)=\omega_{0}+\omega_{1}\frac{z}{1+z}$. This ansatz was first
discussed by Chevallier and Polarski (Chevallier, M. and Polarski, D. 2001) and
later studied
more elaborately by Linder (Linder, E. V. 2003). The best fit values for
this model while fitting with the SNIa gold dataset are
$\omega_{0} = -1.58$ and $\omega_{1} = 3.29$. We will call this parametrization
as $`CPL(Chevallier-Polarski-Linder)~ parametrization'$.
3. $\omega(z)=\omega_{0}+\omega_{1}\frac{z}{(1+z)^{2}}$ ( Jassal, H. K. et. al.
2005). A fairly rapid evolution of this EoS allowed
so that $\omega(z)\ge -1/2$ at $z>0.5$ is consistent with the supernovae
observation.
We will call this parametrization as $`JBP(Jassal-Bagla-Padmanabhan)~
parametrization'$.
4. $ \omega(z)=-1+\frac{(1+z)}{3}
\frac{A_{1}+2A_{2}(1+z)}{A_{0}+2A_{1}(1+z)+A_{2}(1+z)^{2}}$
(Alam, U. et. al. 2004a, 2004b). This ansatz is exactly the cosmological
constant
$\omega = -1$ for $A_{1} = A_{2} = 0$ and DE models with $w =-2/3$ for
$A_{0} = A_{2} = 0$ and $\omega = -1/3$ for $A_{0} = A_{1} = 0$. It has
also been found to give excellent results for DE models in which
the equation of state varies with time including quintessence,
Chaplygin gas, etc. The best fit values of $A_{1}$ and $A_{2}$ are $A_{1}=-4.16$
and $A_{2}=1.67$ for the SN1a Gold dataset.
We will call this parametrization as $`ASSS(Alam-Sahni-Saini-Starobinski)~
parametrization'$.
5. $\omega(z)=\omega_{0}+\omega_{1}\ln(1+z)$ (Efstathiou, G. 1999). This
evolution form of EoS is valid for $z<4$.
$\omega_{1}$ is a small number which can be determined by the observations. The
minimum value of $\omega_{1}$ is
approximately $-0.14$ and $\omega_{0}\ge -1$. We will call it
$`Log~parametrization'$.\\
Thermodynamics of DE universe filled up with the fluids with linear and JBP
parametrization had been studied before
(Mazumder, N. et. al. 2010). Thermodynamics with CPL was studied by Xing, L.
et. al. (2011). In this paper we are
going to study DE universe filled with fluid Log parametrization and ASSS
parametrization respectively which have
not been studied before so extensively.
As we went through the literature we came to know that apart from the
cosmological constant ($\omega=-1$), the
sinehyperbolic scalar field potential (Sahni, V. and Starobinsky, A.A 2000;
Urena-Lopez, L.A. and Matos, T. 2000;
Sahni, V. et al. 2003) and the topological defect models, there is no DE model
with constant $\omega$ consistent with observation
(Alam, U. et. al. 2004a). Almost every model like Quintessence, Chaplygin gas
(Setare, M.R. and Bouhmadi-Lopez, M.R. 2007, Setare, M.R. 2007b)
etc. depicts significant evolution in $\omega(z)$ over sufficient time scale.
The way towards a meaningful reconstruction of $\omega(z)$ depends on inventing
an efficient fitting function
for either $d_{L}(z)$ or $H(z)$, where $d_{L}(z)$ is luminosity distance and
$H(z)$ is Hubble parameter.
The parameters of this fitting function are determined by matching to Supernova
observations. Now, one can
manage to reconstruct the functional form of $\omega(z)$ by taking an ansatz of
the luminosity distance or
$H(z)$ and then by comparing it to supernova observations.\\
This model is based on the following ansatz:
\begin{equation}
H(x)=H_{0}\left(\Omega_{m}x^{3}+A_{0}+A_{1}x+A_{2}x^{2}\right)^{\frac{1}{2}}.
\end{equation}
Which can be equivalently written as following:
\begin{equation}
\rho_{DE}=\rho_{0c}\left(A_{0}+A_{1}x+A_{2}x^{2}+A_{3}x^{3}\right)
\end{equation}
where $x=\left(1+z\right)$, $\rho_{0c}$ is the critical density at present time
and $\Omega_{m}$ dimensionless density of dark energy.\\
If we carefully note we can see the ansatz in terms of energy density is
actually a truncated taylor expansion where it is exploited that any well
bahaved function can be well approximated by Taylor expansion within a range. So
the model is valid for $z \leq $ few (Alam U et. al. 2004a).
The model yields excellent results among DE models in which the EoS varies with
time including quintessence, Chaplygin gas etc. (Sahni, V. et al. 2003; Alam, U.
et al. 2003). Alam, U et al. 2004a (In figure 1 of their paper) have shown the
accuracy of the ansatz when it is applied to several other DE models like
tracker quintessence, the Chaplygin gas and super-gravity (SUGRA) models. They
also plotted the deviation of $\log\left(d_{L}H_{0}\right)$ (which is the
measured quantity for SNe) obtained via the ansatz from the actual model values.
Clearly the ansatz comes out to be of excellent agreement well over a
significant red-shift range for $\Omega_{0m}=0.3$. The ansatz agrees with these
models of dark energy with less than $0.5\%$ errors in the redshift range where
we do have SNe data available (Alam, U. et. al. 2004a)
The analysis of type Ia supernova data involving the priors with most frequently
used condition as $\omega$ = constant and $\omega \geq−1$ leads to confinement
of DE to within a narrow class of models. Moreover, when we impose such priors
on the cosmic equation of state it can result to a complete misrepresentations
of reality as shown in (Maor, et al. 2002).
Recently, evolution of EoS parameter with red shifts been investigated (Vazquez,
J. A. et. al. 2012) by performing a Bayesian analysis of current cosmological
observations. Vazquez, J. A. et. al. have shown if they calculate the Bayes'
factor then most of the data catalogues supports the fact that CPL, JBP are
significantly disfavored with respect to simple $\Lambda CDM$ model
($\omega=-1$). Now, linear parametrization is not to be considered in such a
case as the parametrization blows with high $z$. Calculation of Bayes' factor
for ASSS and $\Lambda CDM$ using codes like CAMB etc are bit cumbersome. But
from intuition we can see $-1$ term is already there in ASSS parametrization.
Extra additional $z$ dependent term may make it different enough from the
$\Lambda CDM$ model. Even theoretically, ASSS is efficient enough to explain
different phases of universe. These observational supports substantiate our
interest of working with ASSS model.
Linear, CPL, JBP or ASSS - all the parametrizations can be treated as kind of
combinations of polynomials with $(1+z)$ as polynomial parameter. These modes
are efficient for early epochs whereas they are not that effective for low
redshifts. While parametrizing, the fact that at low redshifts the
magnitude-redshift relation is degenrate for the models having same deceleration
parameter, should be taken into account. Besides keeping consistency of SN
constraints on dimensionless DE density with those derived from CAMB
measurements is another important factor. These constrains along with many
others had given birth of Log parametrization. This is somehow not following the
common pathway of considering DE EoS. These properties did motivate us to take
Log parametrization for the study of thermodynamics of universe.
We will study the universe from thermodynamical aspect for both type of fluids
separately in Sec. \ref{section2}. In Sec. \ref{section3} we will seek the
thermal EoS for both type of fluid separately along with study on
thermodynamical stability. The Internal Energy, thermodynamic pressure, entropy,
temperaure as a function of volume have been determined in this section. The
Sec. \ref{section4} contains a general derivation of thermal EoS for any fluid
obeying $p=\omega(z) \rho$ and a general discussion on stability criterion. We
have tried to give a physical interpretation of the generality of thermal EoS
and the expression for energy density as a function of temperature and volume in
this section. Study of instable cases, onset of instability have been discussed
in Sec. \ref{sectionsuppliment}. The Sec. \ref{section5} proves the validity of
laws of thermodynamics on the apparent horizon and invalidity of the same on the
event horizon which agrees with (Mazumder, N. et. al. 2010; Xing, L. et. al.
2011). The paper ends with a brief concluding remark in Sec. \ref{section6}.
\section{Study of Universe Treating it as an Thermodynamical
System}\label{section2}
Let us consider an Universe filled with a perfect fluid having volume $V$ and
$\rho,~p,~T$ and $S$ are
respectively the energy density, thermodynamical pressure,
temperature and entropy of the system. From the first law of
thermodynamics (Myung, Y. S. 2011) we have
\begin{equation}\label{1}
TdS=d(\rho V)+pdV=d[(\rho + p)V]-Vdp
\end{equation}
We have the following integrability condition (Myung, Y. S. 2011), i.e., $$
\frac{\partial^{2}S}{\partial T
\partial V} = \frac{\partial^{2}S}{\partial V \partial T}~$$ which yields
(Gong, Y. et. al. 2007)
\begin{equation}\label{2}
\frac{dp}{\rho + p}=\frac{dT}{T}.
\end{equation}
So combining equations (\ref{1}) and (\ref{2}) and integrating we obtain the
following:
\begin{equation}\label{3}
S=\frac{(\rho + p)V}{T},
\end{equation}
where we have dropped an additive constant devoid of physical significance.\\
We assume our universe to be homogeneous and isotropic FRW
space-time with following line element
\begin{equation}\label{4}
ds^{2}=-dt^{2}+a^{2}(t)\left[\frac{dr^{2}}{1-kr^{2}}+~{r}^{2}\left(d\theta^{2}
+\sin^{2}\theta d\phi^{2}\right) \right],
\end{equation}
where $k$, the curvature scalar
having values $0,~\pm 1$ for flat, closed and open universe respectively.
The Friedmann equations and the energy conservation equation are
\begin{equation}\label{5}
H^{2}=\frac{8\pi G \rho}3 - \frac{k}{a^{2}}~,
\end{equation}
\begin{equation}\label{6}
\dot{H}=-4\pi G (\rho + p) + \frac{k}{a^{2}}
\end{equation}
and
\begin{equation}\label{7}
\dot{ \rho }+3H(\rho + p) = 0~,
\end{equation}
where the Hubble parameter is given by $H=\frac{\dot{a}}a$.
\subsection{{\bf Thermodynamics of Fluid with
$\omega(z)=\omega_{0}+\omega_{1}\ln \left(1+z\right)$}}
Using this $\omega(z)$, integrating the energy conservation equation (\ref{7})
we would get the expression of energy density as a function of redshift as
follows :
\begin{equation}\label{8}
\rho=\rho_{0}(1+z)^{3\left\{1+\omega_{0}+\frac{\omega_{1}}{2}ln(1+z)\right\}},
\end{equation}
where $\rho_{0}$ is integration constant denoting the present time ($z=0$)
density of our universe. \\
\begin{enumerate}
\item From the graph of energy density vs redshift, we observe that $\rho$
initially has a very high value. With expansion of the universe, $\omega$
decreases, hence $\rho$ falls to $\rho_{0}$ as $z$ becomes $0$ from $2$.
\item The graph of Pressure vs Redshift reveals that the pressure remains
positive for allowed values of $\omega_{0}$ and $\omega_{1}$ in the range $ z
\in (0,2)$. It initially starts with high value and then decreases down to
present value as $z$ goes from $2$ to $0$.
\end{enumerate}
The integration of the integrability condition (\ref{2}) gives expression for
temperature as
\begin{equation}\label{9}
T=T_{0}(1+\omega)(1+z)^{3\left\{\omega_{0}+\frac{\omega_{1}}{2}ln(1+z)\right\}},
\end{equation}
where $T_{0}$ is the integration constant(Note $T_{0}(1+\omega_{0})$ is the
present time temperature).\\
\begin{itemize}
\item The graph between Temperature and Redshift reveals that Temperature is
also dropping as we traverse from past to present, i.e, as $z$ varies from $2$
to $0$.
\end{itemize}
Using the last two expressions and plugging in the expression for entropy in the
last section, we get the following expression for $S$ :
\begin{equation}\label{10}
S=\frac{\rho_{0}}{T_{0}}.
\end{equation}
The heat capacity and square of the sound velocity are given by:
\begin{equation}\label{11}
c_{V}(z)=V\frac{\partial{\rho}}{\partial{T}}=\frac{3S\left(1+\omega\right)}{
\omega_{1}+3\omega\left(1+\omega\right)}.
\end{equation}\\
Note when $\omega < -1$ we have $c_{V} < 0$ i.e universe is in a unstable phase.
Hence the stability condition demands:
$\omega > -1 $.
The expression for the sound's speed :
\begin{equation}\label{12}
\mathit{v}_{s}^{2}(z)=\frac{\partial{p}}{\partial{\rho}}=\omega+\frac{\omega_{1}
}{3(1+\omega)} .
\end{equation}
Now demanding $\mathit{v}_{s}^{2}<1$ we get the following condition:
\begin{equation}\label{13}
\left\{\omega_{0}+\omega_{1}ln(1+z)\right\}^{2} \leq \frac{3-\omega_{1}}{3},
\end{equation}
along with the constraint $\omega_{1}\leq 3$. $z$ has a minimum value of $0$,
hence we can write:
\begin{equation}\label{14}
3\omega_{0}^{2}+\omega_{1} \leq 3.
\end{equation}
\begin{figure}
~~~~~~~~~~~~~~~Fig 1~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~Fig
2~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~Fig 3\\
\includegraphics[height=1.8in,
width=1.8in]{fig1.eps}~~~~~~~\includegraphics[height=1.8in,
width=1.8in]{fig2.eps}
~~~~~~~\includegraphics[height=1.8in, width=1.8in]{fig3.eps}
\end{figure}
\subsection{{\bf Thermodynamics of Fluid with
$\omega(z)=-1+\frac{1+z}{3}\frac{A_{1}+2A_{2}(1+z)}{A_{0}+2A_{1}(1+z)+A_{2}
(1+z)^{2}}$}}
Using this $\omega(z)$, integrating the energy conservation equation we would
get the following:
\begin{equation}\label{15}
\rho=\rho_{0}M\exp{\left\{-\frac{A_{1}}{\sqrt{A_{0}A_{2}-A_{1}^{2}}}\arctan{
\left(\frac{A_{0}+A_{2}(1+z)}{\sqrt{A_{0}A_{2}-A_{1}^{2}}}\right)}\right\}},
\end{equation}
where $\rho_{0}$ is integration constant which we can get putting $z=0$, and
$M=A_{0}+2A_{1}(1+z)+A_{2}(1+z)^{2}$.
On the other hand integrating the integrability condition, we get the expression
for temperature:
\begin{equation}\label{16}
T=T_{0}(1+\omega)(1+z)^{-3}M\exp{\left\{-\frac{A_{1}}{\sqrt{A_{0}A_{2}-A_{1}^{2}
}}\arctan{\left(\frac{A_{0}+A_{2}(1+z)}{\sqrt{A_{0}A_{2}-A_{1}^{2}}}\right)}
\right\}},
\end{equation}
where $T_{0}$ is the integration constant.
Using the last two expressions and plugging in the expression for entropy in the
last section, we get the following expression for $S$:
\begin{equation}\label{17}
S=\frac{\rho_{0}}{T_{0}}.
\end{equation}
The heat capacity and square of the sound velocity are given by:
\begin{equation}\label{18}
c_{V}(z)=V\frac{\partial{\rho}}{\partial{T}}=
S\left\{\frac{1}{1+\omega}+\frac{\frac{3}{(1+\omega)(1+z)}-\frac{1}{(1+\omega)^{
2}}\frac{\partial\omega}{\partial z}}{\frac{1}{1+\omega}\frac{\partial
\omega}{\partial{z}}+\frac{3\omega}{1+z}}\right\}.
\end{equation}
Upon simplification we get:
\begin{equation}\label{18.1}
c_{V}=\frac{3S\left(2+\omega\right)}{3\left(1+\omega\right)^{2}
+\left(1+z\right)\frac{\partial \omega}{\partial z}}~~~~and
\end{equation}\\
\begin{equation}\label{19}
\mathit{v}_{s}^{2}(z)=\frac{\partial{p}}{\partial{\rho}}\\=\omega+\frac{\partial
{w}}{\partial{z}}\left\{\frac{A_{1}+A_{2}(1+z)}{A_{0}+2A_{1}(1+z)+A_{2}(1+z)^{2}
}\right\}^{-1}
\end{equation}
\section{Derivation of Thermal EoS \& Study of Stability}\label{section3}
In this approach it is very convinient to consider the fluid obeying adiabatic
EoS $p=\omega (z) \rho$ with constant particle number $N$ as a thermodynamical
system. Without any loss of generality we can assume the internal energy ($U$)
and the pressure ($p$) as functions of entropy ($S$) and volume ($V$). So we can
structurize our density and pressure and also the concerend differential
equation as (Landau, L. D. and Lifschitz, E. M. 1984)
\begin{eqnarray}\label{20}
\left.\begin{array}{c}
\rho=\frac{U}{V},\\\\
p=-\left(\frac{\partial U}{\partial V}\right)_{S}~~~~and\\\\
\frac{dU}{dV}+\omega (z)\frac{U}{V}=0.
\end{array}\right\}
\end{eqnarray}
\subsection{{\bf Fluid with $\omega(z)=\omega_{0}+\omega_{1}\ln
\left(1+z\right)$}}
In (\ref{20}), use of $\omega(z)=\omega_{0}+\omega_{1}ln(1+z)$, yields the
following:
\begin{equation}\label{22}
U=U_{0}V^{(\frac{\omega_{1}}{6}lnV)}\exp{(-\omega_{0}V)},
\end{equation}
where $U_{0}$ is integration constant (It can be function of $S$ and
$\omega_{0}$ should be greater than $0$ for $U$ not to diverge). Hence the
energy density becomes
\begin{equation}\label{23}
\rho= U_{0}V^{(\frac{\omega_{1}}{6}lnV-1)}\exp{(-\omega_{0}V)},
\end{equation}
while the expression for pressure is following:
\begin{equation}\label{24}
p=U_{0}\left\{\omega_{0}-\frac{\omega_{1}}{3}lnV\right\}V^{(\frac{\omega_{1}}{6}
lnV-1)}\exp{(-\omega_{0}V)}.
\end{equation}
The criterion for stability of the fluid during expansion:
\begin{enumerate}
\item $\left(\frac{\partial p}{\partial V}\right)_{S}< 0$.
\item The thermal capacity at constant volume should be greater than zero i.e
$c_{V}> 0$.
\end{enumerate}
The first condition leads to the following constraint:
\begin{equation}\label{24.1}
\left(\frac{\omega_{1}}{3}lnV-1-\omega_{0}V\right)\left(\omega_{0}-\frac{\omega_
{1}}{3}lnV\right)<\frac{\omega_{1}}{3}.
\end{equation}\\
Now to get to the thermal equation of state we start with the expression for
temperature:
\begin{equation}\label{25}
T=\left(\frac{\partial U}{\partial S}\right)_{V}.
\end{equation}
Using the expression for internal energy we obtain:
\begin{equation}\label{26}
T=\frac{U}{U_{0}}\frac{dU_{0}}{dS}=\frac{dU_{0}}{dS}V^{(\frac{\omega_{1}}{6}lnV)
}\exp{(-w_{0}V)}.
\end{equation}
Now we will look into $U_{0}$ by considering the change of variable from (S,V)
to (P,T). The Jacobian of the transformation is (Landau, L. D. and Lifschitz, E.
M. 1984):
\begin{equation}\label{27}
J=\left(\frac{\partial p}{\partial S}\right)_{V}\left(\frac{\partial T}{\partial
V}\right)_{S}-\left(\frac{\partial p}{\partial V}\right)_{S}\left(\frac{\partial
T}{\partial S}\right)_{V}.
\end{equation}
Note,we can also write it out as the following:
\begin{equation}\label{27.1}
J=-\left(\frac{\partial T}{\partial S}\right)_{V} \left(\frac{\partial
p}{\partial S}\right)_{T}.
\end{equation}
Equating the last two expressions and using the expression for $U$ and $T$ we
get the following:
\begin{equation}\label{28}
\left(\omega_{0}-\frac{\omega_{1}}{3}\ln
V\right)\left(-\omega_{0}+\frac{\omega_{1}}{3V}\ln
V\right)\left\{U_{0}\frac{d^{2}U_{0}}{dS^{2}}-\left(\frac{dU_{0}}{dS}\right)^{2}
\right\}=0
\end{equation}
\begin{equation}\label{29}
\Rightarrow U_{0}\frac{d^{2}U_{0}}{dS^{2}}-\left(\frac{dU_{0}}{dS}\right)^{2}=0
\end{equation}
\begin{equation}\label{30}
\Rightarrow U_{0}=\exp{(\alpha S)},
\end{equation}
where $\alpha$ is integration constant upon integrating the differential
equation for $U_{0}$. Hence we find
\begin{equation}\label{31}
\rho=\frac{T}{\alpha V}~~~\&
\end{equation}
\begin{equation}\label{32}
p=\omega \rho =\frac{T}{\alpha
V}\left(\omega_{0}-\frac{1}{3}\omega_{1}lnV\right)
\end{equation}\\
The existence of critical point requires the following condition to be true:
\begin{eqnarray}\label{32.1}
\left.\begin{array}{c}
\left(\frac{\partial p}{\partial V}\right)_{T}=0\\\\
\left(\frac{\partial^{2}p}{\partial V^{2}}\right)_{T}=0\\\\
\left(\frac{\partial^{3}p}{\partial V^{3}}\right)_{T} < 0
\end{array}\right\}
\end{eqnarray}\\
But in this model, the point where we have $\left(\frac{\partial p}{\partial
V}\right)_{T}=0$ has the property that second of above criterion does not hold
as
\begin{equation}\label{32.2}
\left(\frac{\partial^{2}p}{\partial
V^{2}}\right)_{T}=\frac{T}{\alpha}\frac{\omega^{\prime\prime}(V)}{V}=\frac{
\omega_{1}}{3V^{2}}\frac{T}{\alpha V}\neq 0~~,
\end{equation}
where prime($\prime$) denotes derivative with respect to volume. Hence, in this
model universe does not go through any critical point, even if there is a
transition from unstable to stable configuration or vice-versa the transition is
smooth.\\
The specific heat at constant volume comes out to be :
\begin{equation}\label{33}
c_{V}=T\left(\frac{\partial S}{\partial T}\right)_{V}=\frac{1}{\alpha}~~.
\end{equation}
Choosing $\alpha >0$ guarantees the fulfillment of second criterion for
stability, i.e., $c_{V} >0$.
while the velocity of sound is given by:
\begin{equation}\label{34}
\mathit{v}_{s}^{2}=\omega - \frac{\omega_{1}}{\omega_{1}lnV -3 -3\omega_{0}
V}~~.
\end{equation}
Here also $\mathit{v}_{s}^{2} \leq 1$ imposes constraint on the allowed values
of $\omega_{0}$ and $\omega_{1}$.
From the expression for $U_{0}$, the entropy is given by:
\begin{equation}\label{35}
S=\frac{1}{\alpha}\left[ln\left(\frac{T}{\alpha}\right)+\omega_{0}V-\frac{
\omega_{1}}{6}(lnV)^{2}\right]~~.
\end{equation}
\subsection{{\bf Fluid with
$\omega(z)=-1+\frac{1+z}{3}\frac{A_{1}+2A_{2}(1+z)}{A_{0}+2A_{1}(1+z)+A_{2}
(1+z)^{2}}$}}
The expression for $U$ in this case becomes:
\begin{equation}\label{36}
U=U_{0}\left(A_{0}V+2A_{1}V^{2/3}+A_{2}V^{1/3}\right)\exp
\left\{\frac{A_{1}~\arctan{\left(\frac{A_{0}V^{1/3}+A_{1}}{\sqrt{A_{0}A_{2}-A_{1
}^{2}}}\right)}}{\sqrt{A_{0}A_{2}-A_{1}^{2}}}\right\}
\end{equation}
where $U_{0}$ is integration constant. Therefore energy density comes out to be:
\begin{equation}\label{37}
\rho= U_{0}\left(A_{0}+2A_{1}V^{-1/3}+A_{2}V^{-2/3}\right)\exp
\left\{\frac{A_{1}~\arctan{\left(\frac{A_{0}V^{1/3}+A_{1}}{\sqrt{A_{0}A_{2}-A_{1
}^{2}}}\right)}}{\sqrt{A_{0}A_{2}-A_{1}^{2}}}\right\}
\end{equation}
while the pressure is given by
\begin{equation}\label{38}
p=\omega(V) U_{0}\left(A_{0}+2A_{1}V^{-1/3}+A_{2}V^{-2/3}\right)\exp
\left\{\frac{A_{1}~\arctan{\left(\frac{A_{0}V^{1/3}+A_{1}}{\sqrt{A_{0}A_{2}-A_{1
}^{2}}}\right)}}{\sqrt{A_{0}A_{2}-A_{1}^{2}}}\right\}
\end{equation}
where $\omega(z)=
\left(-1+\frac{1+z}{3}\frac{A_{1}+2A_{2}(1+z)}{A_{0}+2A_{1}(1+z)+A_{2}(1+z)^{2}}
\right)$ and $V=a^{3}=(1+z)^{-3}$.
Now we have two criterions for stability of the fluid during expansion :
\begin{enumerate}
\item $\left(\frac{\partial p}{\partial V}\right)_{S}< 0$
\item The thermal capacity at constant volume should be nonnegative i.e $c_{V}>
0$
\end{enumerate}
The first condition yields:
\begin{equation}\label{38.1}
\left(A_{0}+2A_{1}V^{-\frac{1}{3}}+A_{2}V^{-\frac{2}{3}}\right)\left(\frac{
\partial \omega}{\partial
V}+\omega(V)\frac{A_{1}A_{0}V^{-\frac{2}{3}}}{3\left(A_{0}V^{\frac{1}{3}}+A_{1}
\right)^{2}+\left(A_{0}A_{2}-A_{1}^{2}\right)}\right) < \frac{2}{3}R
\end{equation}
where $R=\left(A_{1}V^{-\frac{4}{3}}+A_{2}V^{-\frac{5}{3}}\right)\omega(V)$.
Now we start with the expression for temperature to obtain thermal EoS:
\begin{equation}\label{39}
T=\left(\frac{\partial U}{\partial S}\right)_{V}
\end{equation}
Using equation(\ref{36}) we obtain:
\begin{equation}\label{40}
T=\frac{U}{U_{0}}\frac{dU_{0}}{dS}=\frac{dU_{0}}{dS}(A_{0}V+2A_{1}V^{2/3}+A_{2}
V^{1/3})\exp
\left\{\frac{A_{1}~\arctan{\left(\frac{A_{0}V^{1/3}+A_{1}}{\sqrt{A_{0}A_{2}-A_{1
}^{2}}}\right)}}{\sqrt{A_{0}A_{2}-A_{1}^{2}}}\right\}
\end{equation}
Equating (\ref{27}) and (\ref{27.1}) for this system we have,
$$
U_{0}\frac{d^{2}U_{0}}{dS^{2}}-\left(\frac{dU_{0}}{dS}\right)^{2}=0 \Rightarrow
U_{0}=\exp{(\alpha S)}$$
This result resembles with (\ref{30}). Even we can see the expression for energy
density will be similar to (\ref{31}). Expression for pressure in this case is :
\begin{equation}\label{45}
p=\omega \rho
=\left\{-1+\frac{1+z}{3}\frac{A_{1}+2A_{2}(1+z)}{A_{0}+2A_{1}(1+z)+A_{2}(1+z)^{2
}}\right\}\frac{T}{\alpha V}
\end{equation}\\
The same conditions (\ref{32.1}) applies here too which yields the following
conditions needed to be satisfied for having critical point:
\begin{equation}\label{45.1}
\omega^{\prime\prime}(V)=0\ \&\ \omega^{\prime\prime\prime}(V) < 0
\end{equation}
The thermal capacity of the system at constant volume becomes :
\begin{equation}\label{46}
c_{V}=T\left(\frac{\partial S}{\partial T}\right)_{V}=\frac{1}{\alpha}
\end{equation}
If we choose $\alpha >0$ it automatically guarantees the fulfillment of second
criterion for stability in the same way as previous section i.e $c_{V} >0$.
While the velocity of sound is given by:
\begin{equation}\label{47}
\mathit{v}_{s}^{2}=\omega +\frac{\frac{\partial \omega}{\partial
V}}{\frac{\partial (\ln
p)}{\partial V}}
\end{equation}
Now $\mathit{v}_{s}^{2} <1 $ imposes constraint on $\omega_{0}$ and
$\omega_{1}$.
From the expression for $U_{0}$, we obtain the expression for entropy which is:
\begin{equation}\label{48}
S=\frac{1}{\alpha}\left[ln\left(\frac{T}{\alpha}\right)-\ln\left(A_{0}V+2A_{1}V^
{2/3}+A_{2}V^{1/3}\right)-\left\{\frac{A_{1}~\arctan{\left(\frac{A_{0}V^{1/3}+A_
{1}}{\sqrt{A_{0}A_{2}-A_{1}^{2}}}\right)}}{\sqrt{A_{0}A_{2}-A_{1}^{2}}}\right\}
\right]
\end{equation}
\section{A General Derivation : Independent of Model}\label{section4}
Looking deep into the fact that in last section we have observed $U_{0}$ comes
out to be $\exp{(\alpha S)}$ independent of the form of $\omega(z)$, here we are
giving a proof without assuming any specific form of $\omega(z)$ which makes the
result stronger.
We start with integrating energy conservation equation (\ref{20})
\begin{equation}\label{49}
U=U_{0}F(V)
\end{equation}
where $F(V)= \exp\left\{- {\Large \int}\frac{\omega(V)}{V} dV\right\}$.
Hence $P$ becomes:
\begin{equation}\label{50}
P=U_{0}\frac{F(V)\omega(V)}{V}
\end{equation}
Now to get to the thermal equation of state we start with the expression for
temperature (\ref{25}) and using the expression for internal energy we obtain:
\begin{equation}\label{52}
T=\frac{U}{U_{0}}\frac{dU_{0}}{dS}=\frac{dU_{0}}{dS}F(V)
\end{equation}
Equating (\ref{27}) and (\ref{27.1}) and using the expression for $U$ and $T$ we
get the following:
\begin{equation}\label{54}
\frac{F(V)F'(V)\omega
(V)}{V}\left[U_{0}\frac{d^{2}U_{0}}{dS^{2}}-\left(\frac{dU_{0}}{dS}\right)^{2}
\right]=0
\end{equation}
$$
\Rightarrow U_{0}\frac{d^{2}U_{0}}{dS^{2}}-\left(\frac{dU_{0}}{dS}\right)^{2}=0
\Rightarrow U_{0}=\exp{(\alpha S)}
$$
where $\alpha$ is integration constant upon integrating the differential
equation for $U_{0}$. Immediately, the expression for the energy density becomes
$\rho=\frac{T}{\alpha V}$ while the pressure is given by
\begin{equation}\label{57}
p=\frac{\omega T}{\alpha V}
\end{equation}
The specific heat at constant volume comes out to be $
c_{V}=T\left(\frac{\partial S}{\partial T}\right)_{V}=\frac{1}{\alpha}
$.\\
and the entropy is given by following expression:
\begin{equation}\label{57.1}
S=\frac{1}{\alpha}\left[ln\left(\frac{T}{\alpha}\right)-lnF(V)\right]
\end{equation}\\
So, $T$ should be $\geq \alpha F(V)$ to ascertain $S\geq 0$.
The third law of thermodynamics demands the entropy to go to 0 as temperature
approaches zero. This demand translates into following criterion when we use
(\ref{57.1}):
\begin{equation}\label{57.11}
\lim_{T\rightarrow 0} \frac{F(V)}{T} =\frac{1}{\alpha}=c_{V}
\end{equation}\\
So we can conclude $F(V)$ must tend to 0 as universe cools down to absolute
zero. Using this information, from equation (\ref{57.11}) we can arrive at the
fact that
$F(V)$ is actually a measure of change of internal energy to effect a small
change in temperature around $T=0$ point. \\\\
{\bf Physically Interpreting The General Form of $U_{0}$ : }\\\\
Irrespective of the model we have obtained,
\begin{equation}\label{57.2}
U_{0}=\exp \left(\alpha S\right)
\end{equation}
In its differential form we can write
\begin{equation}\label{57.3}
\frac{dU_{0}}{U_{0}}=\alpha dS
\end{equation}
Now note we have $S=k_{B}ln\Omega$ where $\Omega$ is the number of micro states
corresponding to a macrostate of the system. In its differential form it looks
very much like (\ref{57.3})
\begin{equation}\label{57.4}
\frac{d\Omega}{\Omega}=\frac{1}{k_{B}}dS
\end{equation}
So comparing equation (\ref{57.3}) and (\ref{57.4}), we interpret $U_{0}$ as a
measure of number of micro states. Now $U_{0}$ is related to energy of universe
at present and it is physically very plausible to have energy going in
proportional to the number of micro states.
\section{Instability : Time scale for Onset of
Instabilities}\label{sectionsuppliment}
He was Einstein in very known history who has made ``the blunder" while
producing a static universe which was unstable and not observationally supported!
Latter instability regarding cosmological, matter creation has been studied
(Saslaw, W. C. 1967). General idea of thermodynamics says that negative specific
heat indicates an instability(of course thermal).\\\\
{\bf Logarithmic Model}\\
Here the expression of $c_{V}$ illustrates two cases of instability:\\
$(1)$ $\omega < -1$ along with $\omega_{1}+3\omega(1+\omega) > 0$ : Now it can
be easily noted that the former criterion implies
the later. Hence we have instability for $\omega < -1$. Taking
$\omega_{0}=-0.995$ and $\omega_{1}=0.25$ (Efstathiou, G. 1999)
we would get that $z=-0.019$ marks the onset of instability. As $z$ goes in
the opposite direction of time, negative $z$ indicates towards the future. So
$z=-0.019$ can be treated as the timescale for onset of instability. This may
suggest that our universe is now in a thermally stable equilibrium but is
heading towards an instability where small perturbation can lead to catastrophic
change.\\\\
$(2)$ $-1<\omega$ along with $\omega_{1}+3\omega(1+\omega) < 0$ : Now $\omega>0$
does not satisfy this condition, hence we have stability when $\omega >0 $. So
the range is narrowed down to $-1<\omega<0$ which in turn implies an interesting
scenario that universe starts off with a stable state and it retains stability
until $\omega$ hits $0$. Thereafter it will undergo a stage of thermal
instability after which again it will be in a thermally stable state when
$\omega$ drops below $-1$.\\
In short, depending on the signature of $\omega_{1}+3\omega(1+\omega)$ the
instability may occur at $\omega=-1$ \footnote{this is the point we do
theoretically mark as the phantom barrier, beyond which universe may turn
towards different singularities like Big Rip(Caldwell et. al 2003; Nojiri et.
al. 2005) etc.} or at $\omega=0$ for which the corresponding $z$ is $-0.019$ \&
$52.517$ respectively. However, there are many objects which are at a redshift
of $z=52.517$. But we are yet to speculate any kind of impacts of thermal phase
transition of universe upon them. So based on current knowledge of explaining
observational data we can rule out such a transition at $z=52.517$. Besides, the
model is valid for $z\leq 4$ (Efstathiou, G. 1999). So we can not comment on the
second possibility staying within the model.
Note there is observation constraint on $\omega_{0}$ as follows: $\omega_{0} >
-1$. So we cant have $\omega < -1$ unless we make z slightly negative i.e we are
heading towards instability. Also note $\omega_{1}$ is assumed to be positive to
reconcile with the fact that $\omega$ should decrease with time.
The expression for speed of sound (\ref{12}) within this model also shows
instability since with $\omega$ going below $-1$, the square of the speed
becomes less than $0$. {\it This is generally termed as adiabatic instability.}
(Bean, R et. al. 2008a, 2008b). Bean, R. et al. (2008a, 2008b) concludes the
models with nontrivial effective coupling between dark matter and
dark energy can lead to exponential growth of small adiabatic
instability which is also characterized by negative sound speed
squared. As a result even if universe starts with a uniform fluid the
instability will bring upon an exponential growth of small density
perturbation. In analogy we also can speculate that it might be possible that at
our indicated points small density perturbation will occur opposing fundamental
phenomenon like the propagation of sound to take place.\\\\
{\bf ASSS Model}\\
Within ASSS model, the expression for $c_{V}$ depicts that the onset of
instability happens when $\omega$ goes below $-2$, which in turn implies the
following timescale of onset of instability:
Taking allowed values of parameter (Alam, U. et. al. 2004a, 2004b) i.e taking
$A_{0}=0, A_{1}=-1.169, A_{2}=1.67$ we get that the instability happens to set
on at $z=-0.02$. The explanation of this scenario will follow the previous
discussion.
\section{On the Validity of Thermodynamical Laws}\label{section5}
Study of thermodynamical laws with the universe as a thermodynamical system has
been done in any literature(Setare, M.R. 2006, 2007a; Setare, M.R., Shafei, S.
2006; Setare, M.R., Vagenas, E. C. 2008; Mazumder, N., Chakraborty, S. 2009,
2010; Bhattacharya, S. Debnath, U. 2011). We will just recall the result
relevant for our models.
\subsection{Validity on Apparent Horizon}
The Friedman metric of a isotropic spatially homogeneous universe is given by:
\begin{equation}\label{59}
ds^{2}=-dt^{2}+\frac{a^{2}}{1-kr^{2}}dr^{2}+r^{\prime 2}
\left(d\theta^{2}+\sin^{2}{\theta}d\phi^{2}\right)
\end{equation}
where $r^{\prime}=ar$.
From this metric we can easily calculate the radius of the apparent horizon
($r^{\prime}_{AH}$) which comes out to be :
\begin{equation}\label{60}
r^{\prime}_{AH}=ar_{AH}=\frac{1}{\sqrt{H^{2}+\frac{k}{a^{2}}}}
\end{equation}
Now, in spatially flat universe, $k=0$ and thereby we have
$r^{\prime}_{AH}=\frac{1}{H}$.
Following Hawking's idea, the temperature associated with the apparent horizon
is:
\begin{equation}\label{61}
T=\frac{\kappa}{2\pi}
\end{equation}
where $\kappa$ is surface gravity of the apparent horizon and given by;
$\kappa=\frac{1}{r^{\prime}_{AH}}$.
Hence, the temperature associated with apparent horizon is $\frac{1}{2\pi
r^{\prime}_{AH}}$.
With k=0, using equation (\ref{5}) we obtain
\begin{equation}\label{62}
H^{2}=\frac{8\pi}{3}\rho
\end{equation}
To determine the validity of first law thermodynamics we first calculate the
energy crossing over this apparent horizon in an infinitesimal dt time which
comes out to be (Bousso, R. 2006)
\begin{equation}\label{63}
-dE_{AH}=\frac{4\pi (P+\rho)}{H^{2}}dt=-\frac{3}{2}\frac{(1+\omega)}{H(1+z)}dz
\end{equation}
On the apparent horizon,the entropy is:
\begin{equation}\label{64}
S_{AH}=\frac{A}{4}=\pi r^{\prime 2}_{AH}
\end{equation}
Hence the following holds :
\begin{equation}\label{65}
T_{AH}dS_{AH}=dr^{\prime}_{AH}
\end{equation}
\begin{equation}\label{66}
dr^{\prime}_{AH}=-\frac{1}{H^{2}}dH=-\frac{3}{2}\frac{(1+\omega)}{H(1+z)}dz
\end{equation}
\begin{equation}\label{67}
\Rightarrow -dE_{AH}=T_{AH}dS_{AH}
\end{equation}
Hence the first law of Thermodynamics holds on apparent horizon independent of
how we are gonna model $\omega(z)$.
Now, Using Gibbs equation we get (Izquirdo, G. and Pavon D. 2006)
\begin{equation}\label{68}
TdS_{I}=dE_{I}+PdV
\end{equation}
where $S_{I}$, $E_{I}$ are entropy and enrgy density respectively inside the
apparent horizon and $E_{I}$ is given by
\begin{equation}\label{69}
E_{I}=\frac{4}{3}\pi r^{\prime 3}_{AH}\rho =\frac{1}{2}r^{\prime}_{AH}
\end{equation}
and the volume bounded by the apparent horizon is
\begin{equation}\label{70}
V=\frac{4}{3}\pi r^{\prime 3}_{AH}
\end{equation}
Therefore using equation (\ref{68}),(\ref{69}) and (\ref{70}), we get
\begin{equation}\label{71}
dS_{I}=\pi r^{\prime}_{AH}(1+3\omega)dr^{\prime}_{AH}
\end{equation}
Rewriting the last equation and using the expression for $dS_{AH}$,we get
\begin{eqnarray}\label{72}
\frac{d\left(S_{I}+S_{AH}\right)}{dz}=-\frac{9\pi}{2H^{2}}\frac{
\left(1+\omega\right)^{2}}{1+z}\\
\Rightarrow
\frac{d\left(S_{I}+S_{AH}\right)}{da}=\frac{9\pi}{2H^{2}}\left(1+\omega\right)^{
2}\left(1+z\right) > 0
\end{eqnarray}
Hence holds the Generalized Second Law of Thermodynamics if we consider apparent
horizon and the volume bounded by it. So the result holds not only for two types
of fluid we have discussed in the paper, but also for every fluid obeying
$p=\omega(z)\rho$ which is in quite agreement with (Mazumder, N. et. al. 2010).
\subsection{Invalidity of Thermodynamic laws on Event Horizon}
The event horizon $r_{EH}$ is defined to be
\begin{equation}\label{73}
r_{EH}=\int_{t}^{\infty}\frac{dt}{a}=\frac{1}{1+z}\int_{z}^{-1} \frac{-1}{H} dz
\end{equation}
Hence the event horizon has a hawking's temperature of $\frac{1}{2\pi r_{EH}}$
which yields
\begin{equation}\label{74}
T_{EH}dS_{EH}=dr_{EH}=\frac{-1}{\left(1+z\right)H}dz
\end{equation}
It follows that:
\begin{equation}\label{75}
dE_{EH}+T_{EH}dS_{EH}=\frac{dz}{\left(1+z\right)H}\left(\frac{3}{2}
\left(1+\omega\right)-1\right) \neq 0 \ unless\ \omega=-\frac{1}{3}
\end{equation}
Note $\omega=-\frac{1}{3}$ is the marginal point of Strong Energy Condition
which is $ 3p + \rho \geq 0$.\\
Employing similar technique we would get, (Xing, L. et. al. 2011)
\begin{equation}\label{78}
\frac{d\left(S_{I}+S_{EH}\right)}{dz}=2\pi
r_{EH}^{4}H\frac{dH}{dz}+\pi\left[3r_{EH}^{3}H^{2}\left(1+\omega\right)+2r_{EH}
\right]\frac{-1}{\left(1+z\right)H}
\end{equation}
Changing the variable to $a$ we get;
\begin{equation}\label{79}
\frac{d\left(S_{I}+S_{EH}\right)}{da}=2\pi
\left(r_{EH}^{4}H\right)\left(\frac{dH}{da}\right)+\pi\left[3r_{EH}^{3}H^{2}
\left(1+\omega\right)+2r_{EH}\right]\frac{1}{aH}
\end{equation}\\
Evidently $\frac{d\left(S_{I}+S_{EH}\right)}{da}$ is not necessarily positive
for any allowed value of $z$ ( as we have $\frac{dH}{da}$ in the expression)
which, in turn, implies that the second law of thermodynamics breaks down inside
the event horizon in quite agreement with Xing, L. et. al. 2011).
\section{Brief Summary}\label{section6}
So far we have studied the thermodynamics of universe from two different models.
It has been revealed that the first model where
$\omega(z)=\omega_{0}+\omega_{1}ln(1+z)$ does not go through any critical point.
In this model Universe started with a high (fig. 1, 2, 3) energy density,
pressure and temperature which falls as $z$ becomes $2$ from $0$.We imposed
restriction on allowed values of $\omega_{0}$ and $\omega_{1}$ by demanding
$\mathit{v}_{s} \leq 1$. The thermal capacity at constant volume changes sign as
$\omega$ becomes smaller than $-1$, but this transition is smooth without having
critical point. Unlike MCG or GCG here pressure depends both on temperature and
volume in thermal EoS. From equation (\ref{20}) we have shown it is possible to
have $c_{V} > 0$ irrespective of model to ascertain a stable universe.
Thereafter we have generalized the results for a general EoS parameter. We have
shown that energy density takes the same form as a function of temperature and
volume irrespective of model with a general limiting behavior of thermodynamic
parameter (\ref{57.11}) to satisfy third law of thermodynamics. We have also
interpreted $U_{0}$ as a measure of number of micro states and argued for it
natural plausibility (\ref{57.3}) and (\ref{57.4}). Moreover, it is intriguing
to note that the thermal EoS (\ref{57}) looks like ideal gas EoS in some way.
Also we have determined the asymptotic behavior of $F(V)$ such that third law
of thermodynamics holds. It has been shown that the first and second law of
thermodynamics hold on apparent horizon not only for our model but also for any
model having $p=\omega(z)\rho$. They fail to be valid on event horizon unless
$\omega$ takes a specific value which is in perfect agreement with (Mazumder, N.
et. al.; Xing, L. et. al. 2011).
One important aspect which have been followed in this paper is a try to
speculate the instabilities. Considering, the thermodynamical point at which the
heat capacity changes its sign, as a transition point we have followed that the
point is nothing but the phantom barrier in the case of Log parametrization.
Even there we can find the squared sound speed turns out to be negative. Even
for ASSS such point arises for negative $z$, theoretically indicating to some
future point. We have also explored the possibility of the fact that at our
indicated points small density perturbation will occur such that the fundamental
phenomenon like the propagation of sound does not take place at all.\\\\
{\bf Acknowledgement : }\\\\
SP would like to acknowledge a debt of gratitude to Indian Academy of Science
for financial support and warm hospitality and IISc, Bnagalore for providing
research facilities as the work was done during a time of summer visit. RB thanks
ISRO grant ``ISRO/RES/2/367/10-11" for providing Research Associate Fellowship.
Authors are thankful to Prof Banibrata Mukhopadhyay for fruitful discussions.
\frenchspacing
|
{
"timestamp": "2012-06-28T02:04:48",
"yymm": "1206",
"arxiv_id": "1206.6333",
"language": "en",
"url": "https://arxiv.org/abs/1206.6333"
}
|
\section{Introduction}
Suppose that $A =\bigoplus ^c _{i=0}A_i$ is an Artinian graded algebra over a field $K$
and $z$ is a homogenous element of $A$.
We denote by $\times z$ the linear map $A \ra A$ defined by $\times z(a)=az$.
We say that $A$ has the weak Lefschetz property if there is a linear form
$g \in A$ such that the rank of $\times g$ is maximum that can be expected only from the
Hilbert function of $A$. In the same sense the strong Lefschetz property means that the rank of
$\times g^k$ is maximum possible for every $k \geq 0$.
(For details see Definition~\ref{def_cosperner}). We use the definition of
``strong Lefschetz property'' in a restricted sense.
For the reason see \cite[Remark~4]{tHjW03}.
Consider the exact sequence
$$0 \ra A/(0:z) \ra A \ra A/(z) \ra 0$$
where the first map is
induced by the multiplication map $\times z$.
It seems to be interesting to ask under what conditions the strong/weak
Lefschetz property of $A$ can be deduced from that of $A/(0:z)$ and $A/(z)$.
Let $g$ be a general linear form (one independent of $z$). For the definition of
``general linear form,'' see Definition~\ref{def_cosperner}.
To determine the rank of the linear map $\times g : A \ra A$ from the knowledge of
the two homomorphisms
$\times g : A/(z) \ra A/(z)$ and $\times g: A/(0:z) \ra A/(0:z)$, one has to consider how a preimage
of $A/(z)$ in $A$ is mapped to $(z)$ by the map $\times z$.
Thus we are led to consider the exact sequence
$0 \ra (0:z) \ra A \ra (z) \ra 0$ as well.
Now to determine the rank of the linear map
$$\times z : A/(0:z) \ra A/(0:z)$$ we consider
the exact sequences
$$0 \ra A/((0:z):z) \ra A/(0:z) \ra A/(0:z)+(z) \ra 0$$
and
$$0 \ra (0:z):z \ra A/(0:z) \ra (z)+(0:z)/(0:z) \ra 0.$$
These sequences may be used repeatedly, so as to complete the calculation of the rank of $\times z$.
In our paper \cite{tHjW06}, we consider a family ${\mathcal F}$
of $K$-algebras
such that if $A \in \cF$ then there is an exact sequence
$0 \ra A' \stackrel{\times z}{\ra} A \ra B \ra 0$, with some linear form $z$,
where
$A' \in \cF$ and $B \in \cF$, and the rank of $\times g : A \ra A$ for a
general linear form can be determined from the knowledge of $A'$ and $B$.
The purpose of this paper is to provide basic tools and framework for considering
such a family of algebras, and to prove the strong/weak Lefschetz property for all
members of the family altogether.
One of the basic tools is the commutator algebra of a nilpotent matrix.
A famous lemma due to I. Schur states that the commutator
algebra of an irreducible matric set
is a division ring.
In some sense we are interested in the other extremal case,
namely the commutator algebra of a single nilpotent matrix.
In our previous paper~\cite{tHjW03} the commutator algebra of some commuting family of
matrices in a full matrix algebra played an important role in studying commutative Artinian algebras.
In this paper we treat the idea more generally.
For a nilpotent matrix $J$ we denote by $\C(J)$ the commutator algebra of $J$.
A description of $\C(J)$ can be found in \cite{fG59}, \cite{hTaA32}, and \cite{rB03}.
The algebra $\C(J)$ as a set of matrices is easily determined as it is the set of solutions of a system of linear equations.
The way it is put as a set of matrices, however, is not quite adequate for our purposes.
We rearrange the order of the basis elements, so that the matrices of the commutator algebra
are ``upper triangular'' as much as possible. The resulting set of matrices is denoted by
$\C(\whJ)$. The use of the transformation
$J \mapsto \whJ$ was suggested by \cite{hW46} and it worked quite effectively in \cite{tHjW03}.
We show the relation between
$\C(\whJ)$ and $\C(\whJ ')$, where $\whJ'$ is the submatrix of $\whJ$
corresponding to the restricted map
$J: \im\; J \ra \im\; J$. This is important in the
inductive argument and it reveals the
structure of the algebra $\C(J)$.
Once this is done, we may determine the Jacobson radical of the commutator algebra;
we also determine in several ways the simple modules of $\C(J)$
(Proposition~\ref{central_module}).
In our papers \cite{tHjW06} and \cite{tHjW07} we study further these central simple
modules for Gorenstein algebras $A$ with a linear form fixed.
Furthermore we show how the rank of
$\whM + \whJ$ can be computed (Proposition~\ref{rank_of_big_matrix}) for a certain
element $\whM \in \C(\whJ)$.
In our application to commutative Artinian $K$-algebras
we are going to choose two linear elements, $l, z$ of $A$, and show that, with certain conditions
imposed on $A$ and $z \in A$,
the rank of $\times (l + \lambda z)$, for most $\lambda \in K$,
reaches the CoSperner number of $A$, so the pair $(A, l + \lambda z)$ is weak Lefschetz.
The main results of this paper are Proposition~\ref{rank_of_deformation_2}
and Theorem~\ref{rank_of_general_element} of Section~4.
To explain the meaning of Theorem~\ref{rank_of_general_element},
let $y, z \in A$ be linearly independent linear forms of
an Artinian algebra $A$. The theorem gives a lower bound for the rank
$$\times (y + \lambda z) \in \End(A)$$
in terms of $\dim A/(z)$ and the ranks of the diagonal blocks of the linear maps
$$\times y ^i : A/(z) \ra A/(z) \in \End(A/(z)), \ i=1,2,\cdots. $$
Since the obvious upper bound for the rank $\times (y + \lambda z)$ is the
CoSperner number of $A$, these considerations give us a sufficient condition for $A$ to have the
weak Lefschetz property (Theorem~\ref{rank_of_general_element} $(ii)$).
This is a direct consequence of Propositions~\ref{rank_of_big_matrix}
and \ref{rank_of_deformation_2}, since $\times y \in \C(\times z)$.
To prove Proposition~\ref{rank_of_deformation_2},
we consider the form ring
$$\Gr_{(z)}(A)=A/(z)\oplus (z)/(z^2) \oplus (z^2)/(z^3) \oplus \cdots \oplus (z^{p-1})/(z^p)$$
for a $K$-algebra $A$ with respect to the principal ideal $(z)$.
It is well known that this is endowed with algebra structure;
in fact it is isomorphic to an algebra $B[y]/I$ where $B=A/(z)$ and
$I$ is generated by ``monomials in $y$,'' which are elements of the form $\overline{a_i}y^{m_i}$.
It turns out that the inequality of the proposition is essentially equivalent to
$$\rank (\times g_1) \leq \rank (\times g_2)$$
where
$g_1$ and $g_2$ are general linear forms of $\Gr_{(z)}(A)$ and $A$ and respectively.
This simplifies the computation of the rank of
a general linear form as it is often the case that even
the rank of $\times g_1$ is sufficiently large.
Throughout this paper $K$ denotes a field of any characteristic unless otherwise specified.
When we discuss the strong Lefschetz property we assume the characteristic of $K$ is zero.
Except for this all results are valid for any characteristic.
In Section~2.2 and an early part of Section~2.3 we do use the exponential of a nilpotent matrix.
However, this is {\em not} essential. We use the exponential, because it facilitates
the description of the matrices of the commutator algebra $\C(J)$. This is easy to see, so
there will be no confusion. In \cite{tHjW07}, we consider Artinian algebras with non-standard grading, but
in this paper all algebras are assumed to have the standard grading so the generators of algebras have degree one.
The authors would like to express their thanks to the referee for invaluable suggestions for this paper.
\section{The commutator algebra of a nilpotent matrix }
\subsection{The Jordan canonical form and Young diagrams}
Let $K$ be a field.
Let $\bM(n)$ denote the set of $n \times n$ matrices with entries in $K$.
We are concerned with nilpotent matrices.
Since all eigenvalues of a nilpotent matrix are 0, its Jordan canonical form is
expressed by telling how it decomposes into Jordan cells.
Thus we are led to use a Young diagram to indicate the Jordan canonical form of
a nilpotent matrix. When we say that $(n_1, n_2, \cdots, n_r)$
is a partition of an integer $n$, it means that all $n_i$ are positive integers such that
$n=n_1+n_2+ \cdots + n_r$. When the terms in a partition of $n$
are arranged in decreasing order we may associate to it a Young diagram of
size $n$ in the well known manner.
The notation $T=T(n_1, n_2, \cdots, n_r)$ denotes the Young diagram
with rows of $n_i$ boxes, $i=1,2,\cdots, r$, where it is tacitly assumed that
$n_1 \geq n_2 \geq \cdots \geq n_r >0$.
The same Young diagram is also denoted by
$T=\whT(\nu _1, \cdots, \nu _p)$, by which it is meant that
the integer $\nu _i$ is the number of boxes in the $i$th column of $T$.
Let $T$ be a Young diagram of size $n$.
Suppose we number the boxes of $T$ with numbers $1,2, \ldots, n$. Then we may define a matrix
$M=(a_{ij}) \in \bM(n)$ as follows:
\begin{center}
\begin{equation} \label{jordan_matrix}
a_{ij}=\left\{ \begin{array}{l} \label{def_of_nilpotent_mat}
1 \ \ \mbox{if $j$ is next to and on the right of $i$,}\\
0 \ \ \mbox{otherwise.}
\end{array}
\right.
\end{equation}
\end{center}
It is easy to see that the matrix $M$ is nilpotent and the matrices given by
different numberings of the same $T$ are different only by a permutation of
rows and columns.
As is well known there is a bijection between the set of Young diagrams of size $n$ and
the conjugacy classes of matrices with a single eigenvalue in $\bM(n)$.
In this note we use only two numberings of a Young diagram.
One is the horizontal numbering starting with the first row and ending with the last row,
and the other vertical.
Here are examples of such numberings for the Young diagram $T=T(5,3,1)$.
\newcommand{\ssvline}[1]{\multicolumn{1}{|c}{#1}}
\newcommand{\svline}[1]{\multicolumn{1}{|c|}{#1}}
\begin{center}
\begin{equation}
\begin{array}{|ccccc} \hline
1 & \svline{2} & \svline{3} & \svline{4} & \svline{5} \\ \hline
6 & \svline{7} & \svline{8} & & \\ \cline{1-3}
9 & \ssvline{} & & & \\ \cline{1-1}
\end{array} ,
\hspace{8ex}
\begin{array}{|ccccc} \hline
1 & \svline{4} & \svline{6} & \svline{8} & \svline{9} \\ \hline
2 & \svline{5} & \svline{7} & & \\ \cline{1-3}
3 & \ssvline{} & & & \\ \cline{1-1}
\end{array}
\end{equation}
\end{center}
When $T$ is numbered vertically we call the matrix defined by (\ref{jordan_matrix}) the
{\bf Jordan second canonical form}.
To write the matrix explicitly, let $n=\nu_1 + \cdots + \nu _p$ be the dual partition of $n=n_1+ \cdots + n_r$.
In other words $\nu _i$ is the number of boxes of the $i$th column of $T(n_1, \cdots, n_r)$.
Let $I_i$ be the $\nu_i \times \nu_{i+1}$ matrix
\begin{equation}
I_{i}=\begin{array}{cl} \\ \cline{1-1}
\svline{E} & \} \nu_{i+1} \\ \cline{1-1}
\svline{O} & \} \nu_{i}-\nu_{i+1} \\ \cline{1-1}
\underbrace{}_{\nu_{i+1}}
\end{array}
\end{equation}
where $E$ is the $\nu_{i+1} \times \nu_{i+1}$ identity and $O$ the $(\nu_{i}-\nu_{i+1}) \times \nu_{i+1}$ zero matrix.
Then the Jordan second canonical form for $T=T(n_1, n_2, \cdots, n_r)$ is the matrix
\begin{equation} \label{jordan_2nd_canonical_form}
\begin{array}{ccccccc} \\ \cline{1-6}
\svline{O} & \svline{I_{1}} & \svline{O} & \svline{\cdots} &\svline{O}& \svline{O} & \} \nu_1 \\ \cline{1-6}
\svline{O} & \svline{O} & \svline{I_{2}} & \svline{\cdots} &\svline{O}& \svline{O} & \} \nu_2 \\ \cline{1-6}
\svline{O} & \svline{O} & \svline{O} & \svline{\cdots} &\svline{O}& \svline{O} & \} \nu_3 \\ \cline{1-6}
\svline{\vdots} & \svline{\vdots} & \svline{\vdots} & \svline{\ddots} & & \svline{\vdots} & \vdots \\ \cline{1-6}
\svline{O} & \svline{O} & \svline{O} & \svline{\cdots} &\svline{O}& \svline{I_{p-1}} & \} \nu_{p-1} \\ \cline{1-6}
\svline{O} & \svline{O} & \svline{O} & \svline{\cdots} &\svline{O} & \svline{O} & \} \nu_p \\ \cline{1-6}
\underbrace{}_{\nu_1} &\underbrace{}_{\nu_2} &\underbrace{}_{\nu_3} & \ldots & & \underbrace{}_{\nu_p}
\end{array}
\end{equation}
When $T$ is numbered horizontally the matrix is usually called the Jordan canonical form.
If it is necessary to distinguish from the second, we call it the Jordan first canonical form.
In our previous paper \cite{tHjW03} we used the term ``Jordan second canonical form''
in a slightly different sense from the use here.
For our convenience we state some notational conventions.
\begin{itemize}
\item
$T=T(n_1, n_2, \cdots, n_r$) indicates that $T$ is a Young diagram with $r$ rows, where $n_i$ is the number of
boxes of the $i$th row. It is assumed that
$n_1 \geq n_2 \geq \cdots \geq n_r > 0$.
$T=\whT(\nu _1, \cdots, \nu _p)$ denotes the Young diagram with
the integer $\nu _i$ as the number of boxes in the $i$th column.
Thus, for example, $T(5,3,1)=\whT(3,2,2,1,1)$.
\item
If $T=T(n_1, \cdots, n_r)=\whT(\nu_1, \cdots, \nu _p)$, then
$n=n_1 + \cdots +n _r$ and $n=\nu _1 \cdots +\nu _p$ are dual partitions to each other.
\item
When we say that $n=n_1 + \cdots + n_r$ is a partition of $n$,
it is not assumed that the terms are arranged in either decreasing or increasing
order, but it is assumed that each term is positive.
\item
When we know that the sequence in a partition $n=n_1 + \cdots + n_r$ is put in decreasing order,
we use the term ``dual partition'' for $\whT(n_1, \cdots, n_r)$, identifying it with the Young diagram.
\item
When $T=T(n_1, \cdots, n_r)$, sometimes $T$ is referred to as a sequence
or a partition in the obvious sense.
\end{itemize}
\subsection{The linear hull of a generic exponential matrix
Until Proposition~1 begins, we assume that char $K$=0. Let $J \in \bM(n)$. As is well known,
the exponential of $J$ is the following:
$${\displaystyle \exp(J)=E+J+\frac{1}{2!}J^2+\frac{1}{3!}J^3+ \cdots }$$
We are interested in the linear hull of the set $\{ \exp(xJ) | x \in K \}$, where $J$ is a nilpotent matrix.
Suppose for the moment that $J$ is a single $n \times n$ Jordan
block of a nilpotent matrix, so
$$J=
\left( \begin{array}{rrrrr}
0 & 1 & 0 & \cdots & 0 \\
0 & 0 & 1 & \cdots & 0 \\
&\ddots & & & \\
0 & 0 & 0 & \cdots & 1 \\
0 & 0 & 0 & \cdots & 0
\end{array}
\right).
$$
Then
$$\exp(xJ)=
\left( \begin{array}{lllll}
1 & x & x^2 /2! & \cdots & x^{n-1}/(n-1)! \\
0 & 1 & x & \ddots & \vdots \\
0 & 0 & 1 & \ddots & x^2/2! \\
0 & 0 & 0 & \ddots & x \\
0 & 0 & 0 & 0 & 1
\end{array}
\right).
$$
Thus the linear hull of $ \{ \exp(xJ) | x \in K \}$
is the vector space spanned by $E, J, J^2, J^3, \cdots, J^{n-1}$.
If $x$ is an indeterminate, we say
$\exp(xJ)$ is a {\bf generic exponential } of $J$.
By an {\bf augmented exponential} we mean a matrix of type either $(O|\exp (J))$ or
its vertical version,
$$\left( \begin{array}{c}
\exp (J) \\ \hline
O
\end{array}
\right),
$$
where $O$ is a zero block of an arbitrary size as long as it fits $\exp(J)$.
If $J$ is a single Jordan cell of a nilpotent matrix,
the linear hull of an
augmented generic exponential of $J$ is the vector space
consisting of matrices which are one of the following types.
\begin{equation} \label{lower_cell}
\left( \begin{array}{lllllll}
0 & 0 & 0 & x_{0} & x_{1} & x_{2} & x_{3} \\
0 & 0 & 0 & 0 & x_{0} & x_{1} & x_{2} \\
0 & 0 & 0 & 0 & 0 & x_{0} & x_{1} \\
0 & 0 & 0 & 0 & 0 & 0 & x_{0}
\end{array}
\right)
\end{equation}
\begin{equation} \label{upper_cell}
\left( \begin{array}{llll}
x_{0} & x_{1} & x_{2} & x_{3} \\
0 & x_{0} & x_{1} & x_{2} \\
0 & 0 & x_{0} & x_{1} \\
0 & 0 & 0 & x_{0} \\
0 & 0 & 0 & 0 \\
0 & 0 & 0 & 0
\end{array}
\right)
\end{equation}
\subsection{The commutator algebra of a nilpotent matrix as a set}
For $J \in \bM(n)$ we denote by $\C(J)$ the commutator algebra of $J$, namely
$$\C(J)=\{X \in \bM(n) | XJ=JX\}.$$
Note that $\C(J)$ is an associative algebra with identity.
Let $T=T(n_1, n_2, \cdots, n_r)$ be a Young diagram of size $n$ numbered horizontally
and let $J \in \bM(n)$ be the Jordan canonical matrix of partition $(n_1, n_2, \cdots, n_r)$, so $J$
is the matrix defined by the equation~(\ref{jordan_matrix}). Let $J_i$ be the $i$th diagonal block of $J$,
namely, $J_i$ is the Jordan cell of size $n_i$.
For the moment let us write $\expo(J_i)$ for $\exp(J_i)$ augmented by $O$.
Recall that $\expo(J_i)$ is determined by its size.
(For definition see Section~2.2.)
Introduce letters $x_{ij}$ as many as the number of pairs
$(i,j)$ for $1 \leq i,j \leq r$,
and define, for each pair $(i,j)$, the matrix $X_{ij}$ of
size $n_i \times n_j$
as follows:
\begin{equation} \label{def_of_X_ij}
X_{ij}=\left\{
\begin{array}{l}
\expo(x_{ij}J_j) \mbox{ if $i \leq j$, } \\
\expo(x_{ij}J_i) \mbox{ if $i > j$.}
\end{array}
\right.
\end{equation}
Put
\begin{equation}\label{def_of_X}
X=(X_{ij})
\end{equation}
by which we mean the $n \times n$ block matrix with blocks $X_{ij}$ defined above.
The following lemma was proved by Turnbull and Aitken~\cite{hTaA32} and also by Gantmacher~\cite{fG59}.
The proof here is due to Basili~\cite{rB03}.
\begin{lemma}[Gantmacher\cite{fG59}, Turnbull and Aitken\cite{hTaA32}] \label{description_of_C}
The set $\C(J)$ coincides with the linear hull of the set
$\{ X \in \bM(n) | x_{ij} \in K \}$ as defined in $(\ref{def_of_X_ij})$ and $(\ref{def_of_X})$.
\end{lemma}
{\em Proof.}
Suppose $M$ is an $n \times n$ matrix.
Let $M=(M_{pq})$ be the block decomposition shown in the picture below.
\begin{equation} \label{general_block_matrix}
M=
\begin{array}{ccccc} \\
\cline{1-4}
\svline{M_{11}} & \svline{M_{12}} & \svline{\cdots} & \svline{M_{1r}} & \} n_1 \\ \cline{1-4}
\svline{M_{21}} & \svline{M_{22}} & \svline{\cdots} & \svline{M_{2r}} & \} n_2 \\ \cline{1-4}
\svline{\vdots} & \svline{\vdots} & \svline{\ddots} & \svline{\vdots} & \vdots \\ \cline{1-4}
\svline{M_{r1}} & \svline{M_{r2}} & \svline{\cdots} & \svline{M_{rr}} & \} n_r \\ \cline{1-4}
\underbrace{}_{n_1} &\underbrace{}_{n_2} & \ldots & \underbrace{}_{n_r}
\end{array}
\end{equation}
Then one sees that
the condition $MJ=JM$ means that $M_{pq}J_q=J_pM_{pq}$ for all $1 \leq p,q \leq r $.
Thus the assertion follows from Lemma~\ref{commute} below.
\begin{lemma} \label{commute}
Let $Z=(z_{ij})$ be a $p \times q$ matrix.
Let $J_1$ and $J_2$ be the Jordan cells of sizes $p$ and $q$
respectively.
\begin{enumerate}
\item[$(i)$] If $p \geq q$,
then $J_1Z=ZJ_2$ implies that
$z_{21}=z_{31}=\cdots = z_{p1}=0$ and
$z_{ij}=z_{(i+1)(j+1)}$ for all $i,j$ such that
$1 \leq i \leq p-1$ and $1 \leq j \leq q-1$.
\item[$(ii)$] If $p < q$,
then $J_1Z=ZJ_2$ implies that
$z_{p1}=z_{p2}=\cdots = z_{p(q-1)}=0$ and
$z_{ij}=z_{(i+1)(j+1)}$ for all $i,j$ such that
$1 \leq i \leq p-1$ and $1 \leq j \leq q-1$.
\end{enumerate}
\end{lemma}
{\em Proof } is straightforward.
\begin{remark} \label{small_remark}
{\rm
\begin{enumerate}
\item[$(i)$]
A generic matrix $X$ in $\C(J)$ decomposes as $X=(X_{ij})$, where each block $X_{ij}$ is of the type
shown in (\ref{lower_cell}) for $i \geq j$ and in (\ref{upper_cell}) for $i < j$.
Entries in the last columns in the blocks $X_{ij}$ for $i \geq j$
and those in the first rows in $X_{ij}$ for $i < j$ are algebraically independent.
\item[$(ii)$]
As before let $(n_1, \cdots, n_r)$ be the partition of the nilpotent matrix $J$ put in the Jordan canonical form.
Put $A=K[z]/(z^{n_1})$ and $V =\bigoplus _{i=1} ^{r} K[z]/(z^{n_i})$. Regard $V$ as an $A$-module.
Then we may use monomials as a basis of $V$ so that $J$ is the matrix for the multiplication map
\[
\times z : V \ra V.
\]
It is not difficult to see that $\C(J)$ coincides with $\End_A(V)$. Details are left to the reader.
In this paper this is used only to indicate another proof of Theorem~\ref{main_theorem}.
(See the last paragraph preceding Theorem~\ref{main_theorem}.)
\end{enumerate}
\end{remark}
\begin{example} \label{example_332}
{\rm
Let $T=T(3,3,2)$. Then a general element of $\C(J)$ is of the form
\begin{equation}
\left( \begin{array}{ccc|ccc|cc}
a & a' & a'' & b & b' & b'' & c & c' \\
0 & a & a' & 0 & b & b' & 0 & c \\
0 & 0 & a & 0 & 0 & b & 0 & 0 \\ \hline
d & d' & d'' & e & e' & e'' & f & f' \\
0 & d & d' & 0 & e & e' & 0 & f \\
0 & 0 & d & 0 & 0 & e & 0 & 0 \\ \hline
0 & g & g' & 0 & h &h' & i & i' \\
0 & 0 & g & 0 & 0 &h & 0 & i
\end{array}
\right)
\end{equation}
}
\end{example}
\begin{example}
{\rm
Let $T=T(4,2,1)$. Then a general element of $\C(J)$ is of the form
\begin{equation}
\left( \begin{array}{cccc|cc|c}
a & a' & a'' & a''' & b & b' & c \\
0 & a & a' & a'' & 0 & b & 0 \\
0 & 0 & a & a' & 0 & 0 & 0 \\
0 & 0 & 0 & a & 0 & 0 & 0 \\ \hline
0 & 0 & d & d' & e & e' & f \\
0 & 0 & 0 & d & 0 & e & 0 \\ \hline
0 & 0 & 0 & g & 0 & h & i
\end{array}
\right)
\end{equation}
}
\end{example}
\section{The structure of the commutator algebra of a nilpotent matrix}
To say anything about the structure of the algebra $\C(J)$ we need to look at the
Young diagram $T$ for $J$ more in detail.
Throughout this section we fix $T$ and $J$ as follows:
\begin{enumerate}
\item
$T=T(n_1, \cdots, n_r)$ is a Young diagram of size $n$.
\item
$J$ is the Jordan canonical form of a nilpotent matrix of type $T$.
\end{enumerate}
Let $(f_1, f_2, \cdots, f_s)$ be the finest subsequence of $(n_1, \cdots, n_r)$ such that
$f_1 > f_2 > \cdots > f_s > 0$. Then we can rewrite the same sequence
$(n_1, \cdots, n_r)$ as
\begin{equation} \label{grand_block_decomposition}
(n_1, \cdots, n_r)=(\underbrace{f_1 , \cdots f_1}_{m_1}, \underbrace{f_2, \cdots, f_2}_{m_2}, \cdots ,
\underbrace{f_s, \cdots, f_s}_{m_s}).
\end{equation}
The integer $m_j$ is the multiplicity of $f_j$. Let us call
$m_1, \cdots, m_s$ the
{\bf multiplicity sequence} of $T(n_1, \cdots, n_r)$. Note that it
gives us a partition of the number $r$ of rows of $T$, namely, $r=m_1+m_2+ \cdots + m_s$.
Recall that the Jacobson radical of a ring is defined to be the intersection of
$${\rm ann}(M)$$ where ${\rm ann}(M)$
denotes the annihilator of the module $M$ and
$M$ runs over all simple (right) modules.
The Jacobson radical is a two sided nilpotent ideal and
if it is $0$ then the ring is said to be semisimple. (See e.g., early pages of Herstein~\cite{iH68}.)
The following is a known result.
E.g., this can be implied by \cite[Theorem~3.5.2]{yDvK93} with the identification of $\C(J)$ with
$\End_A(V)$, where $A=K[z]/(z^{n_1})$ as described in Remark~\ref{small_remark}~$(ii)$.
We give a direct proof after Proposition~\ref{key_proposition} and Example~\ref{ex_322}
where we show a number of properties of
a generic matrix of $\C(J)$.
\begin{theorem} \label{main_theorem}
Let $\C(J) \subset \bM(n)$ be the commutator algebra of $J$.
Let $m_1, m_2, \cdots, m_s$ be the multiplicity sequence of $T$.
Let $\rho$ be the Jacobson radical of $\C(J)$. Then
there is a surjective homomorphism
$$\Phi : \C(J) \ra \bM(m_1) \times \bM(m_2) \times \cdots \times \bM(m_s).$$
with $\ker \: \Phi = \rho$.
\end{theorem}
Proof is postponed to the end of Definition~\ref{def_diagonal_blocks}.
The map $\Phi$ is defined in (\ref{big_phi}).
Let $M \in \bM(n)$. Using the partition $n=n_1+ \cdots +n_r$
we decompose $M$ into blocks as indicated in (\ref{general_block_matrix}).
When $M \in \bM(n)$ is considered as a block matrix in this way for a Young diagram
$T=T(n_1, \ldots, n_r)$,
we write $M=(x^{(kl)}_{ij})$,
by which we mean that the element $x^{(kl)}_{ij}$ is the $(i,j)$-entry
of the $(k,l)$-block of $M$.
Note that $M$ has square diagonal blocks.
For $M=(x^{(kl)}_{ij})$ we define the matrix $\whM$ by
$$\whM=(x^{(ij)}_{kl}).$$ The matrices
$M$ and $\whM$ differ only by a certain permutation of rows and columns.
To name the permutation explicitly, let $T_h$ and $T_v$ be the same Young diagram with
the horizontal and vertical numberings respectively.
Let $\pi: T_h \ra T_v$ be the permutation of the integers $\{1,2 ,\cdots, n\}$ which,
as in the picture below, claims that
if a box is numbered $i$ in $T_h$ then the same box is numbered $\pi(i)$ in $T_v$.
\begin{center}
\begin{tabular}{|ccccc} \hline
1 & \svline{2} & \svline{3} & \svline{4} & \svline{5} \\ \hline
6 & \svline{7} & \svline{8} & & \\ \cline{1-3}
9 & \ssvline{} & & & \\ \cline{1-1}
\end{tabular} \hspace{2ex} $\stackrel{\pi}{\longrightarrow}$ \hspace{2ex}
\begin{tabular}{|ccccc} \hline
1 & \svline{4} & \svline{6} & \svline{8} & \svline{9} \\ \hline
2 & \svline{5} & \svline{7} & & \\ \cline{1-3}
3 & \ssvline{} & & & \\ \cline{1-1}
\end{tabular}
\end{center}
Let $P=(p_{ij})$ be the matrix defined by
\begin{equation} \label{permutation_matrix}
p_{ij}=\left\{\begin{array}{l}
1 \ \mbox{ if $j=\pi(i)$,}
\\
0 \ \mbox{ otherwise.}
\end{array}
\right.
\end{equation}
Then one sees easily that $\whM=P^{-1}MP$ for $M \in \C(J)$.
Notice that the indices $i$ and $j$ of blocks of $\whM=(a^{(ij)} _{kl})$ run over 1 through $n_1$,
since $n_1$ is the size of the biggest block of $M=(a^{(kl)}_{ij})$.
Let $n=\nu_1+ \cdots + \nu _p$ be the dual partition of
$n=n_1+ \cdots + n_r$. (So $\nu _i$ is the number of boxes of
the $i$th column of $T$.)
Then one sees with a little contemplation that
the size of the $(i,j)$-block of $\whM$ is $\nu_i \times \nu_j$.
We are interested in the diagonal blocks of $\whM$. So we introduce a definition.
\begin{definition} \label{en_sub_i}
{\rm
For a Young diagram $T=T(n_1, \cdots, n_r)$ and a block matrix $M=(a_{ij}^{(kl)})$ as in $($\ref{general_block_matrix}$)$ above,
we define
$$N_i$$
to be the $i$th diagonal block of $\whM$
for $ i=1,2,\cdots, p$. Note that $N_i$ is a square matrix of size $\nu _i$, where $(\nu_1, \cdots, \nu_p)$
is the dual partition of $T=T(n_1, \cdots, n_r)$.
}
\end{definition}
Recall that $J$ is the $n \times n$ Jordan matrix with partition $T=T(n_1, \cdots, n_r)$.
\begin{proposition} \label{key_proposition}
Using the notation above we have:
\begin{enumerate}
\item[$(i)$]
For $M \in \C(J)$, the matrix
$\whM$ is block upper triangular. Namely, if $i > j$ the
$(i,j)$-block of $\whM$ is O.
\item[$(ii)$]
$\whJ$ is the Jordan second canonical form of $T$.
\item[$(iii)$]
Let $\C(\whJ) \subset \bM(n) $ be the commutator algebra of $\whJ$.
Then the map $\C(J) \ra \C(\whJ)$ defined by
\[
M \mapsto \whM
\] is
a natural isomorphism of algebras.
\item[$(iv)$]
Let $T'=T(n_1-1, n_2-1, \cdots, n_r-1)$,
and let $J'$ be the Jordan canonical matrix with partition $T'$.
Then
$\whM$ decomposes as
$$\whM=
\begin{array}{|c|c|} \hline
N_1 & \ast \\ \hline
O & \whM' \\ \hline
\end{array},
$$
where $M' \in \C(J')$.
\item[$(v)$]
If $\nu_1=\nu_2$, then $N_1=N_2$ for every $M \in \C(J)$.
{\rm $($For the definition of $N_i$ see
Definition~\ref{en_sub_i}.$)$
\item[$(vi)$]
If $\nu_1 > \nu_2$, then $N_1$ decomposes as follows:
\begin{equation} \label{decomposition_of_M_hat}
N_1=
\begin{array}{ccl} \\
\cline{1-2}
\svline{N_{2}} & \svline{G'} & \} r-m_s \\ \cline{1-2}
\svline{O} & \svline{G} & \} m_s \\ \cline{1-2}
\underbrace{}_{r-m_s} & \underbrace{}_{m_s}
\end{array}
\end{equation}
Furthermore all entries of $G$ are algebraically independent of
any other entry of $M$ if $M$ is generic in $\C(J)$.
\item[$(vii)$]
$N_1$ decomposes as:
\begin{equation} \label{diagonal_N_1}
N_1=
\begin{array}{cccccc} \\ \cline{1-5}
\svline{G_{1}} & \svline{\ast} & \svline{\ast} &\svline{\ast}& \svline{\ast} & \} m_1 \\ \cline{1-5}
\svline{O} & \svline{G_{2}} & \svline{\ast} &\svline{\ast}& \svline{\ast} & \} m_2 \\ \cline{1-5}
\svline{O} & \svline{O} & \svline{G_3} &\svline{\ast}& \svline{\ast} & \} m_3 \\ \cline{1-5}
\svline{\vdots} & \svline{\vdots} & \svline{\vdots} & \svline{\ddots} & \svline{\vdots} & \vdots \\ \cline{1-5}
\svline{O} & \svline{O} & \svline{O} &\svline{\cdots}& \svline{G_{s}} & \} m_s \\ \cline{1-5}
\underbrace{}_{m_1} &\underbrace{}_{m_2} & \underbrace{}_{m_3} & \ldots & \underbrace{}_{m_s}
\end{array}
\end{equation}
\end{enumerate}
\end{proposition}
{\em Proof.}
$(i)$ Suppose $j>i$. Let $x^{(ij)}_{kl}$ be the $(k,l)$-entry
of the $(i,j)$-block of $\whM$. Originally it is the
$(i,j)$-entry of $(k,l)$-block of $M$.
In Lemma~\ref{description_of_C} we showed that
each block is ``upper triangular.'' Hence $x^{(ij)}_{kl}=0$ for any $k,l$. \newline
$(ii)$ Left to the reader. \newline
$(iii)$ With $P$ as defined by
(\ref{permutation_matrix}), we have $\whM=P^{-1}MP$. Hence the assertion follows. \newline
$(iv)$ This is not difficult to see. Details are left to the reader. \newline
$(v)$ That $\nu _1=\nu _2$ means that each block
of $M$ has at least two rows and two columns. Hence every block
of $M$ has the $(2, 2)$ entry and moreover
$x^{(kl)}_{11}=x^{(kl)}_{22}$ for all $(k,l)$. This shows that
$N_1=N_2$. \newline
$(vi)$ That $\nu _1 > \nu _2$ means that $\nu _1 - \nu _2=m_s$ and that
$f_s=1$. So there are $m_s ^2$ blocks of size $1 \times 1$ in $M$.
If we write $M$ with variable entries, then these blocks consist of algebraically independent elements by
Remark~\ref{small_remark}. This implies that the entries of $G_s$ consist of algebraically independent elements and
are algebraically independent of any other entries of $M$.
\newline
$(vii)$
In view of $(iv)$ the assertion follows inductively from $(v)$ and $(vi)$.
\begin{example} \label{ex_322}
{\rm
Let T=T(3,2,2). Then a general element of $\C(J)$ is:
\begin{equation}
M=\left( \begin{array}{ccc|cc|cc}
a & a' & a'' & b & b' & c &c' \\
0 & a & a' & 0 & b & 0 &c \\
0 & 0 & a & 0 & 0 & 0 &0 \\ \hline
0 & d & d' & e & e' & f & f' \\
0 & 0 & d & 0 & e & 0 & f \\ \hline
0 & g & g' & h & h' & i & i' \\
0 & 0 & g & 0 & h & 0 & i
\end{array}
\right)
\end{equation}
By a certain permutation it becomes $\whM$ as follows:
\begin{equation}
\whM=\left( \begin{array}{ccc|ccc|c}
a & b & c & a' & b' & c' & a'' \\
0 & e & f & d' & e' & f' & d' \\
0 & h & i & g & h' & i' & g' \\ \hline
0 & 0 & 0 & a & b & c & d' \\
0 & 0 & 0 & 0 & e & f & d \\
0 & 0 & 0 & 0 & h & i & g \\ \hline
0 & 0 & 0 & 0 & 0 & 0 & a
\end{array}
\right)
\end{equation}
Note that the block decomposition of $\whM$ gives us the dual partition $7=3+3+1$.
The first diagonal block of $\whM$ is the matrix
$N_1=\left( \begin{array}{c|cc}
a & b & c \\ \hline
0 & e & f \\
0 & h & i
\end{array}
\right)$. The second $N_2$ is identical with $N_1$. The third $N_3=(a)$.
\end{example}
\begin{definition} \label{def_diagonal_blocks}
{\rm
With the notation made in Definition~\ref{en_sub_i}, we call the sequence of matrices
$$(N_1, N_2, \cdots, N_p)$$
the {\bf coarse diagonal blocks} of $\whM$.
We call the sequence $(G_1, \cdots, G_s)$ the diagonal blocks of
$N_1$. We apply the term analogously to all $N_i$. Hence the diagonal blocks of
$N_i$ are $(G_1, G_2, \cdots, G_t)$ with a
certain $t$ depending on $i$.
By the {\bf fine diagonal blocks} of $\whM$ we mean the totality of the diagonal blocks:
\begin{equation} \label{coarse_diagonal_block}
(\mbox{\rm diag}(N_1), \mbox{\rm diag}(N_2), \cdots , {\rm diag}(N_p))
\end{equation}
\end{definition}
{\em Proof}\ of Theorem~\ref{main_theorem}.
Let $M \in \C(J)$. Let $N_1, N_2, \cdots, N_p$ be the coarse diagonal blocks of
$\whM$, and $G_1, \ldots, G_s$ be the diagonal blocks of $N_1$.
Define the map
\begin{equation} \label{big_phi}
\Phi: \C(J) \ra \bM(m_1) \times \cdots \times \bM(m_s)
\end{equation}
by
$\Phi(M)=(G_1, \cdots, G_s)$.
By Proposition~\ref{key_proposition} $(vi)$ and $(vii)$
it is easy to see that $\Phi$ is surjective and also that the kernel of $\Phi$
is nilpotent.
Since $\bM(m_1) \times \cdots \times \bM(m_s)$ is semisimple, the proof of Theorem~\ref{main_theorem}
is complete.
\begin{remark}
{\rm
The matrices $G_{i}$ defined above are precisely the same as $\bar{A}_{\alpha \beta}$ with $\alpha=\beta=i$
in Basili~\cite[p.60]{rB03}.
Basili shows that $M$ is nilpotent if and only if each $\bar{A}_{ii}$ is nilpotent.
This is particularly obvious after the proof of Theorem~\ref{main_theorem}. Thus we recover Basili's identification
of the nilpotent commutator with the inverse image under $\Phi$ of the locus where the $\bar{A}_{ii}$ or $G_{i}$ are
nilpotent.
}
\end{remark}
In the next proposition we would like to redefine $\Phi$
in a coordinate free manner.
Let $V$ be a finite dimensional vector space over $K$.
We use the same letter $J$ as before to denote a nilpotent element of $\End(V)$.
The notation $\C(J)$ is used in the same meaning as for the matrix.
Namely $\C(J)= \{ M \in \End(V) | MJ=JM \}$.
Note that the subspaces $\ker J^i$ and $\im J^i$ of $V$ are $\C(J)$-modules for every
integer $i$, and so are their sums and intersections.
Let $p$ be the least integer such that $J^p =0$.
(To avoid the trivial case we assume $p>1$.)
We have a descending chain of subspaces:
\[
V=\ker J^{p} + \im J \supset \ker J^{p-1} + \im J \supset \ker J^{p-2}+ \im J \supset \cdots \supset \ker J^{0}+\im J=\im J
\]
From among the sequence of successive quotients
$$(\ker J^{p-i} + \im J)/(\ker J^{p-i-1}+\im J)$$ for $i=0,1, \cdots, p-1$,
pick the non-zero vector spaces and rename them
\begin{equation} \label{definition_of_U}
U_1, U_2, \cdots, U_s
\end{equation}
Note that $U_1=V/(\ker J^{p-1}+ \im J)$. In fact everything may be regarded as
a module over $K[J]$, which is a commutative local ring.
So we may use Nakayama's Lemma to see $V \not = (\ker J^{p-1}+ \im J)$.
Likewise consider the ascending chain of subspaces:
$$\label{definition_of_W}
0=\im J^{p} \cap \ker J \subset \im J^{p-1} \cap \ker J \subset \im J^{p-2} \cap \ker J \subset \cdots \subset \im J \cap \ker J \subset \im J^{0} \cap \ker J = \ker J
$$
Let
\begin{equation}
W_1, W_2, \cdots, W_{s'}
\end{equation}
be the subsequence of non-zero terms of the successive quotients
$$(\im J^{p-i-1} \cap \ker J)/(\im J^{p-i} \cap \ker J) \mbox{ for } i=0,1, \cdots , p-1$$
(We note $W_1=\im J^{p-1} \cap \ker J$.)
The spaces $U_i$ and $W_i$ are $\C(J)$-modules. Hence the module structure induces the
algebra homomorphisms $\phi _i: \C(J) \ra \End(U_i)$ for $i=1,2, \cdots, s$.
Define the algebra homomorphism
\begin{equation} \label{small_phi}
\phi : \C(J) \ra \End(U_1) \times \End(U_2) \times \cdots \times \End(U_s)
\end{equation}
by the concatenation $\phi= (\phi _1, \cdots, \phi _s)$ of all $\phi _i : \C(J) \ra \End(U_i)$.
Similarly define the algebra homomorphism
\begin{equation} \label{small_phi_2}
{\phi}' : \C(J) \ra \End(W_1) \times \End(W_2) \times \cdots \times \End(W_{s'})
\end{equation}
by the concatenation of ${\phi _i}' : \C(J) \ra \End(W_i)$.
Now we have
\begin{proposition} \label{central_module}
\begin{enumerate}
\item[$(i)$]
$s=s'$
\item[$(ii)$]
$\dim U_i =\dim W_i$ for $i=1,2, \cdots, s$
\item[$(iii)$]
If we identify $\End(U_i)$ and $\End(W_i)$ with a full matrix ring using suitable bases, then the homomorphism
$\Phi$ defined in $(\ref{big_phi})$
coincides with $\phi$ in $(\ref{small_phi})$ and with ${\phi'}$ in $(\ref{small_phi_2})$.
\end{enumerate}
\end{proposition}
{\em Proof.} Let $n=\dim V$.
Let $B$ be a basis of $V$ in which $J$ is put in the Jordan first canonical form. Once and for all
we fix such a basis and we identify $\End(V)$ and $\bM(n)$.
Suppose that the matrix for $J$ decomposes into Jordan cells as denoted by the
Young diagram $T=T(n_1, n_2, \cdots, n_r)$.
We index the boxes of $T$ by
the basis elements of $V$ themselves in such a way that it satisfies the following condition:
\[
e, e' \in B \mbox{ and } e'=Je \Leftrightarrow \mbox{ the box $e'$ is next to and on the right of $e$.}
\]
(cf. Equation~(\ref{jordan_matrix}) of Section~2.1.)
Here the ``box $e$'' means that the box is indexed by $e$. Henceforth
we use the words ``box in $T$'' and a ``basis element'' in $B$ interchangeably.
Let $(m_1, m_2, \cdots, m_{s''})$ be the multiplicity sequence of $T$ so we have, by (\ref{grand_block_decomposition})
in Section~3,
$$
(n_1, \cdots, n_r)=(\underbrace{f_1 , \cdots f_1}_{m_1}, \underbrace{f_2, \cdots, f_2}_{m_2}, \cdots ,
\underbrace{f_{s''}, \cdots, f_{s''}}_{m_{s''}}).
$$
Let us call the set of boxes in $T$ a ``rectangle'' if it consists of all rows of the same length $f_i$
for some $i$. Let $T_i$ be the $i$th rectangle.
(So $T_i$ consists of $m_i \times f_i$ boxes.)
We may write $T=T_1 \sqcup T_2 \sqcup \cdots \sqcup T_{s''}$ as a disjoint union of
Young subdiagrams aligned left.
Let $e$ be a basis element of $V$ in the first column of $T$ and let $e'$ be a basis element at the end of a row of $T$.
We have that
$$\mbox{the box $e$ belongs to }\ T_j
\Leftrightarrow \left\{
\begin{array}{l}
J^{k}e=0 \mbox{ if $k=f_j$,} \\
J^{k}e \not = 0 \mbox{ if $k < f_{j}$,}
\end{array}
\right.
$$
and
$$\mbox{the box $e'$ belongs to }\ T_j
\Leftrightarrow \left\{
\begin{array}{l}
e' \in \; \im J^k \mbox{ if $k=f_j-1$,} \\
e' \not \in \; \im j^k \mbox{ if $k > f_{j}-1$.}
\end{array}
\right.
$$
Thus the basis elements in the first column of $T_i$ span the space $U_i$
and those in the last column span $W_i$.
This shows $(i)$ and $(ii)$.
To prove $(iii)$, we have to show that for $M \in \C(J)$, both
$\phi_i(M)$ and ${\phi}'_i(M)$, for each $i$, are represented by the matrix $G_i$ defined in
Proposition~\ref{key_proposition} with a suitable basis for $V$.
Number the boxes of $T$ vertically.
Then one sees that
$\phi_i(M)$, for $M \in \C(J)$, corresponds to the
$i$th diagonal block $G_i$ of $N_1$ as was shown
in Proposition~\ref{key_proposition} $(vii)$.
Also It is not difficult to see that ${\phi}'_i(M) \in \End(W_i)$
corresponds to the last fine
diagonal block in $N_{f_{s+1-i}}$, which is also $G_i$.
This completes the proof.
\begin{remark} \label{central_simple_module}
{\rm
Let $J \in \End(V)$ be nilpotent and let
$U_1, \ldots, U_s$ and $W_1, \ldots, W_s$ be the vector spaces defined above.
Suppose that $T(n_1, \cdots, n_r)$ is the Jordan decomposition
for $J$. Let $(f_1, f_2, \cdots, f_s)$ be the finest subsequence
of $(n_1, \cdots, n_r)$ such that $f_1 > f_2 > \cdots > f_s >0$.
Then we have:
\begin{enumerate}
\item[$(i)$]
$U_i=(\ker J^{f_i} + \im J)/(\ker J^{f_i -1} + \im J) \mbox{ for $i=1,2,\cdots, s$. }$
\item[$(ii)$]
$W_i=(\ker J \cap \im J^{f_i-1})/(\ker J \cap \im J^{f_i}) \mbox{ for $i=1,2,\cdots, s$. }$
\end{enumerate}
These could have been the definitions of the vector spaces $U_i$ and $W_i$.
The statement of Proposition~\ref{central_module}~$(iii)$ means that $\phi _i$ and ${\phi}' _i$ are equivalent
in the sense that there are bijective linear maps $\psi _i: U_i \ra W_i$, which make the following
diagrams commutative
\def\haphi{\stackrel{\phi _{i}(X)}{\longrightarrow}}
\def\haphiprime{\stackrel{\phi '_{i}(X)}{\longrightarrow}}
\newcommand{\mapdown}[1]{\downarrow\rlap{$\vcenter{\hbox{$\scriptstyle #1\, $}}$ }}
\def\dapsi{{\mapdown{\psi_i}}}
\noindent
$${\displaystyle
\begin{array}{ccccccccc}
U_i & \haphi & U_i \\[1ex]
\dapsi & & \dapsi \\[1ex]
W_i & \haphiprime & W_i \\[1ex]
\end{array}
}
$$
for every $X \in \C(J)$.
This shows that $U_i$ and $W_i$ are isomorphic as modules over $\C(J)$.
If we take grading into account, we have the isomorphism
\[U_i[1-f_i]\cong W_i,\]
since the degrees of the boxes of the first column of $T_i$ are different
from those in the last column of $T_i$ by $f_i-1$ row-wise.
This isomorphism can be proved more directly as follows.
\def\raa{\stackrel{J^{i}}{\ra}}
\def\rab{\stackrel{J^{i-1}}{\ra}}
\def\daa{{\mapdown{J}}}
\def\dab{{\mapdown{J}}}
\noindent
Consider the diagram. (We write $0:J$ for $\ker J$.)
$${\displaystyle
\begin{array}{ccccccccccccccc}
& & 0 & & 0 & & & & \\[1ex]
& & \da & & \da & & & & \\[1ex]
0 & \ra & 0:J & \ra & 0:J & \ra & 0 & & \\[1ex]
& & \da & & \da & & \da & & \\[1ex]
0 & \ra & 0:J^{i} & \ra & 0:J^{i+1} & \raa & 0:J \cap \im J^{i} & \ra & 0 \\[1ex]
& & \daa & & \dab & & \da & & \\[1ex]
0 & \ra & 0:J^{i-1} & \ra & 0:J^{i} & \rab & 0:J \cap \im J^{i-1} & \ra & 0 \\[1ex]
& & \da & & \da & & \da & \\[1ex]
0 & \ra & 0:J^{i-1}/J(0:J^i) & \ra & 0:J^{i}/J(0:J^{i+1}) & \ra & X & \ra & 0 \\[1ex]
& & \da & & \da & & \da & & \\[1ex]
& & 0 & & 0 & & 0 & &
\end{array}
}
$$
\noindent
The definition of the maps should be self-explanatory.
Also note the isomorphisms
\[
\frac{0:J^i}{J(0:J^{i+1})} \cong \frac{0:J^{i}+\im J}{\im J}
\]
and the same for $i-1$ instead of $i$.
Then the last horizontal exact sequence shows that $X$ is isomorphic to $U_j$ if $i=f_j$.
But the last vertical
exact sequence shows that $X \cong W_j$. Thus we have $U_j \cong W_j$ for all $j$.
\end{remark}
\begin{remark} \label{new_remark}
{\rm
\begin{enumerate}
\item
$U_1, \cdots, U_s$ are simple $\C(J)$-modules of different isomorphism types, and these exhaust all
isomorphism types of simple $\C(J)$-modules.
\item
When $J$ is given as the multiplication map $\times z \in \End(A)$, $A$ an Artinian $K$-algebra,
we term the modules $U_1, \cdots, U_s$ the ``central simple modules'' of the pair $(A,z)$, and
study them further in \cite{tHjW06} and \cite{tHjW07}.
We say ``central simple'' in the sense it has no proper submodules
over the centralizer of $J$.
\item
Let $A$ be an Artinian Gorenstein $K$-algebra, not necessarily graded,
and let $z \in A$ be any non-unit element.
Let
$(f_1^{m_1}, \cdots, f_s^{m_s})$ be the partition for
the Jordan decomposition of the nilpotent element $\times z \in \End(A)$.
Then it is possible to define $U_1, \cdots, U_s, W_1, \cdots, W_s$ as
in Remark~\ref{central_simple_module}.
Since $A$ is Artinian Gorenstein, ${\rm Hom}_A({}\_, A)$ is an
exact functor. Hence we have the isomorphism
\[
{\rm Hom} _A (U_i, A) \cong W_i,
\mbox { and }
{\rm Hom} _A (W_i, A) \cong U_i.
\]
\noindent So
\[
{\rm Hom} _A (U_i, A) \cong U_i, \mbox{ and
} {\rm Hom} _A (W_i, A) \cong W_i.
\]
This explains the symmetry of the Hilbert function of $U_i$ shown in \cite[Proposition~4.6]{tHjW06} and
\cite[Proposition~5.3]{tHjW07}.
\end{enumerate}
\end{remark}
\begin{proposition} \label{conjugation}
Let $T$, $J$ and $\C(J)$ be the same as above. Given $M \in \C(J)$,
it is possible to put the fine diagonal blocks of $\whM$ in the Jordan
canonical form by conjugation without affecting the shape of $\whJ$.
\end{proposition}
{\em Proof.} We have to find an invertible matrix $H$ such that
$H^{-1}\whJ H=\whJ$ and
the fine diagonal blocks of $(H^{-1}\whM H)$ are Jordan first canonical forms. Let
$N_1, \cdots, N_p$ be the diagonal blocks as in the proof of Theorem~\ref{main_theorem}.
Let $G_1, \cdots, G_s$ be the diagonal blocks of $N_1$.
Let $F_i \in \bM(m_i)$ be an invertible matrix such that
$F^{-1}_iG_iF_i$ is the Jordan canonical form of $G_i$.
Let $H_1\in \bM(r)$ be the matrix which has $F_i$ as the $i$th diagonal
block and O off diagonal. Then $H_1$ puts the diagonal blocks of $N_1$ into
the Jordan canonical forms. In the same way define $H_i$ for $N_i$ for
each $1 \leq i \leq p$. Finally define $H \in \bM(n)$ so that it has $H_i$ as the
$i$th diagonal block and O off diagonal.
One sees easily that $H$ does not change the shape of $\whJ$, so it has the
desired property.
\begin{proposition} \label{rank_of_big_matrix}
Let $T=T(n_1, n_2, \cdots, n_r)$, $J$ and $\C(J)$ be the same as before.
Let $m_1, \cdots, m_s$ be the multiplicity sequence of $T$ so that we have
$$
T(n_1, \cdots, n_r)=T(\underbrace{f_1 , \cdots f_1}_{m_1}, \underbrace{f_2, \cdots, f_2}_{m_2}, \cdots ,
\underbrace{f_s, \cdots, f_s}_{m_s})
$$
where $f_1, \cdots f_s$ is the descending subsequence of $n_1, \cdots, n_r$.
Let $M \in \C(J)$ and let $N_1$ be the first coarse diagonal block of $\whM$.
Suppose that the diagonal blocks of $N_1$ are $(G_1, \cdots, G_s)$ and
moreover that all entries of $\whM$ are 0 except in the diagonals $N_1, \cdots, N_p$.
Then we have:
$$\mbox{\rm rank}\ (\whM + \whJ) \geq
\mbox{\rm rank }G_1^{f_1} + \mbox{\rm rank }G_2^{f_2} + \cdots + \mbox{\rm rank }G_s^{f_s} + \mbox{\rm rank }J.$$
Assume furthermore that all entries of $N_1$ are zero outside of the
diagonal blocks $G_1, \cdots, G_s$. Then we have:
$$\mbox{\rm rank}\ (\whM + \whJ) =
\mbox{\rm rank }G_1^{f_1} + \mbox{\rm rank }G_2^{f_2} + \cdots + \mbox{\rm rank }G_s^{f_s} + \mbox{\rm rank }J.$$
\end{proposition}
{\em Proof.}
First we prove the second assertion.
Assume $s=1$.
It means that $T=T(\underbrace{n_1, \cdots, n_1}_{r})$. In this case the matrix $\whM+\whJ$ has the form:
\begin{equation} \label{matrix2}
\begin{array}{ccccccc} \\ \cline{1-6}
\svline{N_1} & \svline{E} & \svline{O} & \svline{\cdots} &\svline{O} & \svline{O} & \} m \\ \cline{1-6}
\svline{O} & \svline{N_2} & \svline{E} & \svline{\cdots} &\svline{O} & \svline{O} & \} m \\ \cline{1-6}
\svline{O} & \svline{O} & \svline{N_3} & \svline{\cdots} &\svline{O}& \svline{O} & \} m \\ \cline{1-6}
\svline{\vdots} & \svline{\vdots} & \svline{\vdots} & \svline{\ddots} & \svline{\ddots} & \svline{\vdots} & \vdots \\ \cline{1-6}
\svline{O} & \svline{O} & \svline{O} & \svline{\cdots} &\svline{N_{p-1}} & \svline{E} & \} m \\ \cline{1-6}
\svline{O} & \svline{O} & \svline{O} & \svline{\cdots} &\svline{O} & \svline{N_p} & \} m \\ \cline{1-6}
\underbrace{}_{m} &\underbrace{}_{m} &\underbrace{}_{m} & \ldots & & \underbrace{}_{m}
\end{array}
\end{equation}
Here $G_1=N_1=N_2= \cdots =N_p$, and $p=n_1=f_1$ and $m=r$.
We are going to apply basic row and column operations to this
matrix so that the rank can be computed.
Use row operations by block so the matrix becomes:
\begin{equation}
\begin{array}{ccccccc} \\ \cline{1-6}
\svline{G} & \svline{E} & \svline{O} & \svline{\cdots} &\svline{O} & \svline{O} & \} m \\ \cline{1-6}
\svline{-G^2} & \svline{O} & \svline{E} & \svline{\cdots} &\svline{O} & \svline{O} & \} m \\ \cline{1-6}
\svline{G^3} & \svline{O} & \svline{O} & \svline{\cdots} &\svline{O}& \svline{O} & \} m \\ \cline{1-6}
\svline{\vdots} & \svline{\vdots} & \svline{\vdots} & \svline{\ddots} & \svline{\ddots} & \svline{\vdots} & \vdots \\ \cline{1-6}
\svline{\pm G^{p-1}} & \svline{O} & \svline{O} & \svline{\cdots} &\svline{O} & \svline{E} & \} m \\ \cline{1-6}
\svline{\mp G^p} & \svline{O} & \svline{O} & \svline{\cdots} &\svline{O} & \svline{O} & \} m \\ \cline{1-6}
\underbrace{}_{m} &\underbrace{}_{m} &\underbrace{}_{m} & \ldots & & \underbrace{}_{m}
\end{array}
\end{equation}
(We have put $G=G_1$.) Now use column operations to kill all the matrices in the blocks of the first column, except the
block at the bottom. Thus the rank is equal to $\rank \, G^{p} + m(f_1-1)$ and the assertion
of the proposition is proved for the case $s=1$.
Now assume $s >1$. Write $T$ as a disjoint union of rectangles, $T=T_1 \sqcup T_2 \sqcup \cdots \sqcup T_s$.
(It means that $T_i=T(\underbrace{f_i, \cdots, f_i}_{m_i})$ for each $i$.)
We proceed to the general case by induction on $s$.
As before let $N_1, N_2, \cdots, N_p$ be the coarse diagonal blocks of $\whM$ and let
$I_1, I_2, \cdots, I_{p-1}$ be the above diagonal blocks for $\whJ$. (These are
described as matrices (3) and (4) in Section~2.2.)
Recall that $I_i$ is a matrix of size $\nu_i \times \nu_{i+1}$.
If $\nu_i=\nu_{i+1}$, then $I_i$ is the identity matrix of that size
and if $\nu_i > \nu_{i+1}$ then it is the identity of size $\nu_{i+1}$
augmented by a zero block from below.
Also recall that the diagonal blocks of $N_1$ are $G_1, \cdots, G_s$ as shown in the figure~(\ref{diagonal_N_1}).
By Proposition~\ref{key_proposition}, $G_1$ is contained in $N_i$ for every $i$ as the first diagonal block.
Let us write $m:=m_1$ for the size of $G_1$. Let $E^{(i)}$ be the submatrix of $I_i$ consisting
of the first $m$ rows and $m$ columns. ($E^{(i)}$ is nothing but the identity matrix of size $m$.)
Notice that the $G_1$ appears $p$ times as
a fine diagonal block of $\whM$ (one time in every $N_i$), and that except the first
there is $E^{(i)}$ somewhere above $G_1$ in the same column block.
Now making row operations using $E^{(i)}$ it is possible to kill all $G_1$ of the same column block.
As in the case $s=1$ we are left with consecutive powers of $G_1$ in the first column block.
Now make column operations, using various $E^{(i)}$ to annihilate everything on the same row block.
Then as a result all $G_1$ disappear except the single $G^p$ at the bottom of the
first column block. Moreover from every $N_i$ the first row block disappears.
Now for the rows and columns that were not involved by the above procedure we may
apply the induction hypothesis for
$T(\underbrace{f_2, \cdots f_2}_{m_2} \cdots \underbrace{f_s, \cdots,f_s}_{m_s})$.
This completes the proof for the second statement. The proof for the first inequality is proved similarly.
\section{Application to the theory of Artinian $K$-algebras}
Before we state our main theorem we introduce some definitions.
By dimension we will mean dimension as a $K$-vector space.
\begin{definition} \label{def_cosperner}
Let $A=\bigoplus _{i=0} ^c A_i$ be a graded Artinian $K$-algebra, where $A_0=K$ is a field and $A_c \not = 0$.
\begin{itemize}
\item
The algebra $A$ has the {\bf weak Lefschetz property} $(${\bf WLP}$)$ if there is a
linear element
$y \in A_1$ such that the multiplication
$\times y : A_i \rightarrow A_{i+1}$ is either injective or surjective
for all $i=0,1, \cdots, c-1$.
A {\bf weak Lefschetz element} is a linear element $y$ with this property.
\item
The algebra $A$ has the {\bf strong Lefschetz property} $(${\bf SLP}$)$ if there is a
linear element
$y \in A_1$ such that the multiplication
$\times y^{c-2i} : A_i \rightarrow A_{c-i}$ is bijective
for all $i=0,1, \cdots, [c/2]$.
We call a linear element $y$ with this property a {\bf strong Lefschetz element}.
\item
An element $y \in A_1$ is a {\bf general linear form} if there is a non-empty Zariski open set $U \subset A_1$,
such that $y$ has the same Jordan canonical form as every element $y' \in U$. (A general linear form exists if
$K$ is infinite.)
\item
The {\bf Sperner number} of $A$ is
${\rm Max}\{ {\rm dim }A_i | i=0,1,2 \cdots , c \}$.
\item
The {\bf CoSperner number} of $A$ is
${\displaystyle
\sum_{i=0}^{c-1} {\rm Min}\{
{\rm dim } A_i, {\rm dim } A_{i+1}
\}
}$.
\end{itemize}
\end{definition}
\begin{remark} \label{remarks_on_SLP}
{\rm
It is easy to see the following.
\begin{enumerate}
\item[$(i)$]
For any $y \in A$, the rank of $\times y$ does not exceed the CoSperner number of $A$.
\item[$(ii)$]
For any $y \in A$, the dimension of $A/yA$ is no less than the Sperner number of $A$.
\item[$(iii)$]
A linear form $y \in A$ is a weak Lefschetz element for $A$ if and only if
the rank of $\times y$ is equal to the CoSperner number of $A$.
\item[$(iv)$]
If the Hilbert function of $A$ is unimodal, then we have
$$\mbox{\rm Sperner} A+\mbox{\rm CoSperner} A=
\mbox{\rm dim} A.$$
\item[$(v)$]
If $A$ has the strong/weak Lefschetz property, then a general linear form is a strong/weak Lefschetz element.
\item[$(vi)$]
Suppose that $y \in A$ is a linear form and $T=T(n_1, \cdots, n_r)$ is the partition for the
nilpotent endomorphism $\times y \in \End(A)$. Then $y$ is a weak Lefschetz element if and only if
$r$, the number of the Jordan blocks of $\times y$, is equal to the Sperner number of $A$.
Also $y$ is a strong Lefschetz element if and only if the dual partition $\whT(\nu _1, \cdots, \nu _p)$ of $T$
is the one obtained from the unimodal Hilbert function of $A$. (cf. \cite[Lemma~3.7]{tHjW07}.)
\end{enumerate}
\end{remark}
We are going to apply Proposition~\ref{rank_of_big_matrix} to Artinian $K$-algebras to evaluate the rank of a
general linear form.
First we review some basic facts on the associated form ring of an Artinian algebra with respect to the
principal ideal generated by a linear form. This was motivated by the necessity to prove
Proposition~\ref{rank_of_deformation_2}.
Let $A$ be a graded Artinian $K$-algebra and let $z \in A_1$ be any linear
form of $A$.
Put
$$\Gr_{(z)}(A)=A/(z)\oplus (z)/(z^2) \oplus (z^2)/(z^3) \oplus \cdots \oplus (z^{p-1})/(z^p).$$
Here $p$ is the least integer such that $z^p =0$.
As is well known $\Gr_{(z)}(A)$ is endowed with a commutative ring structure.
The multiplication in $\Gr_{(z)}(A)$ is given by
$$
(a+(z^{i+1}))(b+(z^{j+1}))=ab+(z^{i+j+1}),
$$
where $a \in (z^i)$ and $b \in (z^j)$.
Note that $\Gr(A)$ inherits a grading from $A$ and
in this sense $\Gr(A)$ and $A$ have the same Hilbert function.
For a non-zero element $a \in A$ there is
$i$ such that $a \in (z^i) \setminus (z^{i+1})$.
In this case
we write $a^{*} \in \Gr(A)$ for the natural image of
$a$ in $(z^{i})/(z^{i+1})$.
Let $\times : A \ra \End(A)$ be the regular representation of the algebra $A$.
(So $\times a$ is the endomorphism of $A$ defined by $\times a(b)=ab$ for $a,b \in A$.)
Let $z$ be a linear form of $A$. Since $A$ is Artinian, $\times z$ is nilpotent.
Let $T=T(n_1, \cdots, n_r)$ be the Young diagram for the Jordan
canonical form of $\times z$.
One sees easily that the number $r$ of
parts of $T$ is equal to
${\rm dim}\ {\rm ker}[\times z: A \ra A] ={\rm dim}A/(z)$, since each Jordan cell of $\times z$
contributes 1 to the dimension of the kernel.
The Young diagram $T(n_1-1, n_2-1 , \cdots, n_r-1)$, with zero's deleted, corresponds to
the Jordan canonical form of the induced map $\times {\overline z} \in \End(A/(0:z))$.
Thus, inductively, it follows that
the dual of the partition $T(n_1, \cdots, n_r)$ is
$T(\nu_1, \nu_2, \cdots, \nu_p)$, with the integers
$\nu _i ={\rm dim}(z^{i-1})/(z^{i})$.
Let $\bB \subset A$ be a $K$-basis of $A$ in which
$\times z$ is written as a Jordan canonical form.
We identify the boxes of $T=T(n_1, \cdots, n_r)$ and the elements of $\bB$.
With this identification a row of $T$ is a basis for a Jordan cell of $\times z$.
Let $\bB_i=\bB \cap ( (z^{i-1}) \setminus (z^{i}) )$. It is easy to see that
$\bB_i \sqcup \bB_{i+1} \sqcup \cdots \sqcup \bB_{p}$
is a $K$-basis for the ideal $(z^{i-1})$.
With the identification of $\bB$ and the boxes of $T$, the set $\bB_i$ corresponds to
the boxes in the $i$th column of $T$.
Now let $\bB^{\ast}$ be the natural image of $\bB$ in $\Gr(A)$, i.e,
$\bB^{\ast} = \{b^{\ast} \in \Gr(A) | b \in \bB\}$.
Similarly let $\bB_i^{\ast}=\{b^{\ast} | b \in \bB_i\}$.
One sees immediately that $\bB^{\ast}$ is a basis of $\Gr(A)$ in which
the map $\times z^{\ast}$ is represented by a Jordan canonical form.
It is also immediate to see that $\bB^{\ast}_i$ is a $K$-basis for $(z^{i-1})/(z^{i})$.
Now we would like to prove
\begin{proposition} \label{application}
We use the notation above.
\begin{enumerate}
\item[$(i)$]
The linear maps
$\times z \in \End(A)$ and $\times z^{\ast}\in \End(\Gr_{(z)}(A))$ have the
same Jordan canonical form.
\item[$(ii)$]
Let $y \in A$ be any element. Then $\times y \in \C(\times z)$ and
$\times y^{\ast} \in \C(\times z^{\ast})$.
\item[$(iii)$]
Let $y \in A$ be any linear form.
Let $P$ be the matrix for $\times y$ with the basis $\bB$ and similarly
$Q$ the matrix for $\times y^{\ast}$ with the basis $\bB^{\ast}$.
Then the coarse diagonal blocks of $\widehat{P}$ and those of $\widehat{Q}$ coincide.
\item[$(iv)$] The kernel of the multiplication map $z^{\ast}: \Gr_{(z)}(A) \ra \Gr_{(z)}(A)$ is
given by
$$\bigoplus ^{p} _{\alpha =1}\left((z^{i-1}) \cap (0:z)+ (z^{i})\right)/(z^{i})$$
\end{enumerate}
\end{proposition}
{\em Proof.} $(i)$ Consider the ideal of $\Gr(A)$ generated by a power of $z^{\ast}$.
First note that $(z^{\ast})^{\alpha}= (z^{\alpha})^{\ast}$. Now it is easy to see that
$(z^{\ast})^{\alpha}\Gr_{(z)}(A) \cong (z^{\alpha})/(z^{\alpha +1})\oplus \cdots $, which
implies that ${\rm rank} (\times z)^{\alpha}= {\rm rank} (\times z^{\ast})^{\alpha}$ for all $\alpha = 1,2, 3, \dots$
This shows that they have the same Jordan canonical form.
$(ii)$ Trivial.
$(iii)$
A coarse diagonal block of $\widehat{P}$ is the matrix for the induced map
$\times y :A \in \End{((z^{\alpha}})/(z^{\alpha +1}))$ with the basis
$\bB_{\alpha}^{\ast}$. Thus the assertion follows immediately.
$(iv)$ Left to the reader.
Let $R=K[x_1, x_2, \cdots, x_d]$ be the polynomial ring and let $I \subset R$ be
a homogeneous ideal such that $A=R/I$ is an Artinian $K$-algebra.
Put $Z=x_d$. For any homogeneous element $f \in R$
it is possible to write uniquely
$$f= f_0 + f_1Z + f_2 Z^2 + \cdots + f_k Z^k$$
where $f_i$ is a homogeneous polynomial in $K[x_1, \cdots, x_{n-1}]$.
Let $i$ be the least integer such that $f_i \not = 0$.
In this case we will write ${\rm In}'(f)= f_iZ^{i}$.
Furthermore we define
${\rm In}'(I)$ to be the ideal of $R$ generated by the set
$\{ {\rm In}' (f) \}$,
where $f$ runs over homogeneous elements of $I$.
It is well known that $R/{\rm In}'(I) \cong {\rm Gr}_{(z)}(A)$.
(Here we have set $z= Z\ {\rm mod }\ I$) Suppose that $d=2$. Then one notices that
${\rm In}'(I)$ coincides with the ideal generated by the initial terms of $I$ with respect to the
reverse lexicographic order with $x_1 > x _2$. The same notation, ${\rm In}'(I), {\rm \ and \ }\Gr_{(z)}(I)$ will be applied for a
graded submodule $I$ of a finite colength in a free $R$-module.
\begin{proposition} \label{rank_of_deformation_2}
Let $K$ be an infinite field, and let
$V$ be a finite vector space over $K$. Let $J \in \End(V)$ be nilpotent.
Choose a basis of $\, V$ so that we may identify $\End(V)=\bM(n)$, where $n= \dim V$ and
$J$ is put in the Jordan first canonical form.
Let $\C(J) \subset \End(V)$ be the commutator algebra of $J$.
Let $M \in \End(V)=\bM(n)$ be nilpotent such that $ M \in \C(J)$. Let $N_1, \ldots, N_p$ be
the coarse diagonal blocks of $\whM$. Let $M^{\dagger}$ be the matrix such that
$\whM ^{\dagger}={\rm diag}(N_1, \cdots, N_p)$.
Then $M^{\dagger} \in \C(J)$. Moreover we have
$$
\mbox{\rm rank} (M^{\dagger}+ \lambda J) \leq \mbox{\rm rank} (M + \lambda J)
$$
for most $\lambda \in K$.
\end{proposition}
{\em Proof.}
It is easy to see that $M^{\dagger} \in \C(J)$ so we omit the proof. To prove the second assertion
let $R=K[y,z]$ be the polynomial ring in two variables. Define an algebra homomorphism $R \ra \End(V)$
by $y \mapsto M$ and $z \mapsto J$. Then we may regard $V$ as an $R$ module with support in the maximal ideal $(y,z)$.
(Note that $V$ is not necessarily graded.)
Now $M$ is the matrix for the multiplication map $\times y : V \ra V$ and $J$ for $\times z: V \ra V$.
Let $$\Gr_{(z)}(V)= V/zV\oplus zV/ z^2V \oplus \cdots \oplus z^{p-1}V/z^pV$$
The module $\Gr_{(z)}(V)$ has naturally the structure of $R$-module.
One notices that $\times z \in \End(\Gr_{(z)}(V))$
has the same Jordan canonical form as $J$. Moreover the matrix for $\times y \in \End((\Gr_{(z)}(V))$
is $M^{\dagger}$.
Let $g \in R$ be a general linear form.
Now by \cite[Proposition~3.3]{tHjW06} we have
$$\dim V/g V \leq \dim \Gr_{(z)}(V)/g \Gr_{(z)}(V) $$
This proves the assertion as we may assume that $g=y + \lambda z$
for a sufficiently general $\lambda \in K$ .
\begin{theorem} \label{rank_of_general_element}
Let $K$ be an infinite field and
let $A=\bigoplus A_i$ be a graded Artinian $K$-algebra and let $z \in A$
be any linear form.
Suppose that the Jordan decomposition of the nilpotent element
$$\times z \in \End(A)$$
is given by
$$
T=T(n_1, \cdots, n_r)=T(\underbrace{f_1 , \cdots f_1}_{m_1}, \underbrace{f_2, \cdots, f_2}_{m_2}, \cdots ,
\underbrace{f_s, \cdots, f_s}_{m_s}).
$$
Let $y \in A$ be a linear form linearly independent of $z$.
Let $J, M \in \End(A)$ be the matrices for
$z, y$ with a basis of $A$ so that
$J$ is in the Jordan second canonical form.
Let $N_1, \cdots, N_r$ be the coarse diagonal blocks
of $\whM \in \C(\whJ)$ $($as defined in
{\rm Definition~\ref{def_diagonal_blocks}}$)$ and
let
$G_1, \cdots, G_s$ be the diagonal blocks of $N_1$.
Then we have:
\begin{enumerate}
\item[$(i)$]
$\mbox{\rm rank }G_1^{f_1} + \mbox{\rm rank }G_2^{f_2} + \cdots + \mbox{\rm rank }G_s^{f_s} + \mbox{\rm rank }J
\leq \rank(M+ \lambda J) $ for most $\lambda \in K$.
\item[$(ii)$]
The equality
\[
\mbox{\rm rank }G_1^{f_1} + \mbox{\rm rank }G_2^{f_2} + \cdots + \mbox{\rm rank }G_s^{f_s} + \mbox{\rm rank }J
={\rm CoSperner}(A),
\]
implies that $y+ \lambda z$ is a weak Lefschetz element of $A$ for most $\lambda \in K$.
\end{enumerate}
\end{theorem}
\def\B{{\bf B}}
{\em Proof.}
$(i)$
Put $G=\Gr_{(z)}(A)$ and let $z^{\ast}, y^{\ast} \in G$ be the initial forms of $z, y$ respectively.
Recall that $\times z \in \End(A)$ and $\times z ^{\ast} \in \End(G)$ have the same Jordan canonical form.
We may choose a basis $\B \subset A$ such that $J:=\times z$ is in Jordan canonical form
as well as $\times z^{\ast}$ with $\B ^{\ast}$.
Let $M^{\dagger}$ be the matrix for $\times y^{\ast}$ with $B^{\ast}$.
Since $y \not \in (z)$,
the matrix $\whM^{\dagger} \in \C(\whJ)$ consists of only diagonal blocks by
the way the multiplication is defined in $G$.
Moreover they are the same as those of $\whM$.
Thus we have $$ \rank(M^{\dagger} + \lambda J) \leq \rank(M + \lambda J) $$
for most of $\lambda \in K$ by Proposition~\ref{rank_of_deformation_2}.
Now the first inequality immediately follows from Proposition~\ref{rank_of_big_matrix}.
$(ii)$ This follows form Proposition~\ref{remarks_on_SLP} $(iii)$.
The following Theorem was proved in \cite[Theorem~1.2]{tHjW06}. The proof is essentially the same as
that of Theorem~\ref{rank_of_general_element}~$(ii)$ above.
\begin{theorem} \label{thm_from_jpaa}
Let $A$ be an Artinian Gorenstein $K$-algebra and $z \in A$ a linear form.
Let $$U_1, \cdots, U_s$$
be the central simple modules defined in $(\ref{definition_of_U})$
for the nilpotent endomorphism $\times z \in \End(A)$. Suppose that
all $U_i$ have the strong Lefschetz property as $A$-modules.
Then $A$ has the strong Lefschetz property.
\end{theorem}
\begin{remark} \label{rem_on_csm}
{\rm
Let $A$ be an Artinian $K$-algebra and $z \in A$ a linear form.
The central simple modules of $(A,z)$ are defined to be the non-zero modules
of the form
$(0:z^f + (z))/ (0:z^{f-1}+(z))$.
They are modules over the algebra $A/(z)$ and are determined by the
Jordan canonical form of $\times z$.
Let $G=\Gr_{(z)}(A)$ be the associated form ring. Then the endomorphims
$\times z^{\ast} \in \End(G)$
and $\times z \in \End(A)$
have the same Jordan canonical form.
Thus the central simple modules of $(G, z^{\ast})$ can be regarded as the same
modules over of $A/(z)$ with the identification $G/(z^{\ast})=A/(z)$.
Suppose that $A$ is Gorenstein. Then, even though $G$ may not be Gorenstein, the strong
Lefschetz property of the central simple modules of $(A,z)$ implies that
$G$ has the strong Lefschetz property.
(See \cite[Theorem~5.2]{tHjW07}.)
}
\end{remark}
\section{Examples}
In the following examples we show how Theorem~\ref{rank_of_general_element} can be used to
compute the rank of a general linear form for $A$ and to prove the weak Lefschetz property of $A$.
We proved in \cite{tHjMuNjW01}
that every Artinian complete intersection in codimension three over a field of characteristic zero has the WLP.
The method to prove it in Examples~\ref{ex_b3} and \ref{tama_sampia_2003} are different from the one used in
\cite{tHjMuNjW01}.
\begin{example} \label{ex_b3}
Assume that $K$ is a field of {\rm char}$\, K \neq 2$.
Let $R=K[x,y,z], I=(x^2+y^2+z^2, x^4+y^4+z^4, xyz)$, and $A=R/I$.
$($We use the same letter $z$ for the image of $z$ in $A$.$)$
Then $\times z \in \End(A)$ is
represented by the partition
\[
24=\underbrace{5+5+5+5}_{4}+\underbrace{1+1+1+1}_4
\]
The dual partition is
\[
24=8+4+4+4+4
\]
The Young diagram is as follows:
\begin{center}
\begin{equation}
\begin{array}{|ccccc} \hline
{\ } & \svline{\ } & \svline{\ } & \svline{\ } & \svline{\ } \\ \hline
{} & \svline{} & \svline{} & \svline{} & \svline{} \\ \hline
{} & \svline{} & \svline{} & \svline{} & \svline{} \\ \hline
{} & \svline{} & \svline{} & \svline{} & \svline{} \\ \hline
{} & \ssvline{} & & & \\ \cline{1-1}
{} & \ssvline{} & & & \\ \cline{1-1}
{} & \ssvline{} & & & \\ \cline{1-1}
{} & \ssvline{} & & & \\ \cline{1-1}
\end{array}
\hspace{8ex}
\end{equation}
\end{center}
The rank of a general linear form of $A$ is $18$.
$A$ has the strong Lefschetz property, but
$z$ itself is not a strong Lefschetz element, since the rank of $\times z$ is 14.
\end{example}
{\em Proof.}
Since~char$\, K \neq 2$, we have that $A$ is Artinian.
Consider the exact sequence
\[
0 \ra A/0:z \ra A \ra A/(z) \ra 0
\]
The first column of the Young diagram corresponds to $A/(z)$, with the
Hilbert function $1+2t+2t^2+2t^3+t^4$. So the dimension is 8.
Put $B=K[x,y,z]/(x^2+y^2+z^2, xy, z^4)$.
Then it is easy to see that
$zA \cong A/(0:z) \cong K[x,y,z]/(x^2+y^2+z^2, xy, x^4+y^4+z^4) \cong B$.
The ideal $zA$ corresponds to the diagram with the first column deleted.
Now it is easy to compute $\dim B/(0:z^i)=4(4-i), \mbox{ for } i=0,1,2,3$.
Thus we have verified the partition for $\times z$ is
$\whT(8,4,4,4,4)=T(5,5,5,5,1,1,1,1)$.
Put $J= \times z$. Then a general member of $\C(J)$ has coarse diagonal blocks
$N_1, \cdots, N_5$ whose sizes are $(8,4,4,4,4)$ respectively and $N_1$ has two diagonal
blocks $G_1$ and $G_2$ of size $4$.
Let $U_1$ and $U_2$ be the central simple modules as defined in Remark~\ref{central_simple_module} and
let $g$ be a general linear form of $A$.
Let $G_1$ be a matrix for the induced map
$\times g \in \End(U_1)$ and $G_2$ for $\times g \in \End(U_2)$.
Notice that we have the exact sequence
$$0 \ra U_2 \ra A/(z) \ra U_1 \ra 0,$$ where the first map sends 1 to $xy$.
Thus we have
\[
\left\{\begin{array}{l}
U_1 \cong K[x,y]/(x^2+y^2, xy), \\
U_2 \cong K[x,y]/(x^2+y^2, xy)[-2].
\end{array}
\right.
\]
In the notation of Proposition~\ref{rank_of_big_matrix}, $s=2, f_1=5, f_2=1$ and
$\rank G_1 ^{f_1} + \rank G_2 ^{f_2} + \rank J= 0+2+16=18$.
Since the CoSperner number of $A$ is 18, this shows that $A$ has the weak Lefschetz property
by Theorem~\ref{rank_of_general_element}. By direct computation or using \cite{aI94} Theorem~2.9 or
\cite{tHjMuNjW01} Proposition~4.4, it follows that $U_1$ and $U_2$ have the SLP.
Hence by Theorem~\ref{thm_from_jpaa}, $A$ has the SLP.
\begin{example} \label{tama_sampia_2003}
{\rm
Assume that $K$ is an infinite field of $\mbox{char}\ K \neq 2$. Let $R=K[x,y,z]$ and
let $A=R/(x^4+y^4+z^4, xy^3+x^2z^2, y^3z)$. Then, as is easily calculated, $\times z \in \End(A)$ is represented by
$$T=T(7,7,7,7,7,7,3,3,3,3,3,3,1,1,1,1)=\whT(16,12,12,6,6,6,6).$$
Thus using the notation of Proposition~\ref{rank_of_big_matrix}, $f_1=7, f_2=3, f_3=1$.
Put $J=\times z$. Let $M$ be a general member of $\C(J)$.
The first coarse diagonal block $N_1$ of $M$ is of size $16$ and
it consists of fine diagonal blocks $G_1, G_2, G_3$ of sizes
$6,6,4$ respectively. Let $U_1, U_2, U_3$ be the central simple modules defined in Remark~\ref{central_simple_module}.
Then, as with the previous example, it is not difficult to see that $A$ is Artinian and that
\[
\left\{
\begin{array}{l}
U_1 \cong K[x,y]/(x^2, y^3), \\
U_2 \cong K[x,y]/(x^2, y^3)[-2], \\
U_3 \cong K[y]/(y^4)[-3].
\end{array}
\right.
\]
Let $g \in A$ be a general linear form and let $G_i$ be the matrix for the
induced map $\overline{g} \in \End(U_i)$.
Thus one sees that $G_1$ and $G_2$ are the nilpotent matrix with Jordan decomposition $T(4,2)$ and
$G_3$ with $T(4)$.
Thus $$\rank \: G_1 ^{f_1} + \rank \: G_2 ^{f_2} + \rank \: G_3 ^{f_3} + \rank \: J=0+ 1+ 3+ 48=52.$$
This is equal to the CoSperner number of $A$.
By Theorem~\ref{rank_of_general_element}, this shows that $A$ has the weak Lefschetz property.
As in the previous Example, $z$ is not a weak Lefschetz element, but by Theorem~\ref{thm_from_jpaa},
$A$ has the strong Lefschetz property.
\end{example}
\begin{example} \label{four_variable_yokohama}
Let $R=K[w, x, y, z]$ be the polynomial ring, and
put $$A=R/(w^2, wx, x^3, xy, y^3, yz, z^3).$$
\end{example}
(We use the same letters $w,x, \cdots$ to denote their images in $A$.)
The Jordan decomposition of $J:=\times z \in \End(A)$ is represented by
the partition $T=T(3,3,3,3,1,1,1,1)=\whT(8, 4, 4)$.
For a general linear form $g \in A$, the matrix $\times g \in \C(\whJ)$ has three coarse diagonal blocks of
sizes $8,4,4$. The first block $N_1$ has two diagonal blocks of
size four each.
One sees that
\[
\left\{
\begin{array}{l}
U_1 =A/(0:z^2 + (z)) \cong K[w,x,y,z]/(w^2, wx, x^3, y, z), \\
U_2= ((0:z) + (z))/(z) \cong K[w,x,y]/(w^2, x, y^2)[-1].
\end{array}
\right.
\]
(To see this, notice that $(0:z) + (z))/(z)$ is a principal ideal of $A/(z)$ generated by $y$.)
Both $U_1$ and $U_2[1]$ have the Hilbert function $(1,2,1)$.
Let $g \in A$ be a general linear form let $G_i$ be the matrix for the induced maps
$\times \overline{g} \in \End(U_i)$.
Then $\rank (G_1) ^3 + \rank G_2 + \rank J= 0 + 2 + 8 = 10$, which is equal to the CoSperner number of $A$.
Hence $A$ has the WLP. In fact $A$ has the SLP, but Theorem~\ref{thm_from_jpaa} does not apply since $A$ is not
Gorenstein. However, \cite[Theorem~5.2]{tHjW07} does apply.
Alternatively we may let $w$ do the role of $z$.
The Jordan decomposition for $\times w$ is given by
$T=T(\underbrace{2,2,2,2,2}_5,\underbrace{1,1,1,1,1,1}_{6})=\whT(11,5)$.
We have
\[
\left\{
\begin{array}{l}
U_1 =A/((0:w) + (w)) \cong K[w,x,y,z]/(w, x, y^3, yz, z^3), \\
U_2= ((0:w) + (w))/(w) \cong K[x,y,z] / (x^2, y, z^3)[-1].
\end{array}
\right.
\]
$U_1$ has the Hilbert function $(1,2,2)$, and $U_2$ $(0, 1,2,2,1)$.
(Notice that $(1,2,2,0,0)+(0,1,2,2,1)=(1,3,4,2,1)$ is the
Hilbert function of $A/(z)$.)
Let $g \in A$ be a general linear form and let $G_1$ and $G_2$ be matrices for $\times \overline{g} \in \End(U_1)$
and $\times \overline{g} \in \End(U_2)$
respectively. Then
$\rank (G_1) ^2 + \rank G_2 + \rank (J)= 1+ 4+ 5=10$. This also shows that $A$ has the WLP\@.
Note that we can use neither Theorem~\ref{thm_from_jpaa} nor \cite[Theorem~5.2]{tHjW07} to prove
$A$ has the SLP, since $A$ is not Gorenstein and since $U_1$ does not have a symmetric Hilbert function.
\begin{example} \label{yokohama}
Let $K$ be a field of characteristic 0. Let $R=K[x_1, x_2, \cdots, x_n]$ be the polynomial ring with $n \geq 2$.
Let $$I=(x_1^2, x_1x_2, x_2^3, x_2x_3, x_3^3, \cdots, x_{n-1}x_{n}, x_n^3)$$
Then $R/I$ has the SLP.
\end{example}
To prove this we first consider a similar but simpler example as follows.
\begin{example} \label{simplified_yokohama}
Let $K$ be a field of characteristic 0. Let $n > 1$ be an integer.
\begin{enumerate}
\item[$(i)$]
$K[x_1, x_2, \cdots, x_n]/(x_1^2, x_2^2, \cdots, x_{n-1}^2, x_{n-1}x_{n}, x_n^3)$ has the SLP.
\item[$(ii)$]
$A=K[x_0, x_1, \cdots, x_n]/(x_0 ^{\alpha}, x_1^2, x_2 ^2, \cdots, x_{n-1}^2, x_{n-1}x_{n}, x_n^3 )$ has the WLP for any positive
\linebreak[3] integer $\alpha$.
\end{enumerate}
\end{example}
By \cite[Proposition~18]{tHjW03}, $(i)$ follows from $(ii)$.
To prove $(ii)$ we would like to use Theorem~\ref{rank_of_general_element} so that the same proof works for
Example~\ref{yokohama} also.
Put $z=x_n$. Note that $\dim A = 2^n\alpha$.
Furthermore note that $\dim A/(z)= 2^{n-1}\alpha$ and $\dim (z)/(z^2) = \dim (z^2)/(z^3) = 2^{n-2}\alpha$.
This shows that the Jordan canonical form for $\times z \in \End(A)$ is given by the partition:
\[
T=T(\underbrace{3,3,\cdots ,3}_{2^{n-2}\alpha},
\underbrace{1,1,\cdots , 1}_{2^{n-2}\alpha})=\whT(\underbrace{2^{n-1}\alpha}_{1},\underbrace{2^{n-2}\alpha ,2^{n-2}\alpha }_{2})
\]
\noindent The Young diagram is shown in the picture below:
\begin{equation} \label{new}
\begin{array}{cccl} \\
\cline{1-3}
\svline{\vdots} & \svline{\ \ \vdots \ \ } & \svline{\ \ \vdots \ \ } & \} {2^{n-2}\alpha} \\ \cline{1-3}
\svline{\vdots} & & & \} {2^{n-2}\alpha} \\ \cline{1-1}
\underbrace{}_{1} & \multicolumn{2}{c}{ \underbrace{\ \ \ \ \ \ \ \ }_{2} }
\end{array}
\end{equation}
Now we see that the first coarse diagonal block $N_1$ of the matrix
$\times g \in \C(\widehat{\times z})$, where $g$ is a general linear form, is of size $2^{n-1}\alpha$ and it consists of
two fine diagonal blocks of size $2^{n-2}\alpha$ each.
Note that
$$
\left\{
\begin{array}{l}
U_1:=A/(0:z^2)+(z) \cong K[x_0, x_1, \cdots, x_{n-2}]/(x_0^{\alpha}, x_1 ^2, \cdots, x_{n-2}^2) \\
U_2:=(0:z)+(z)/(z) \cong K[x_0, x_1, \cdots, x_{n-2}]/(x_0^{\alpha}, x_1 ^2, \cdots, x_{n-2}^2)[-1]
\end{array}
\right.
$$
Since $U_1$ and $U_2$ have the SLP, if $g \in A$ is a general linear form, then
the rank of $\times \overline{g^j} \in \End(U_i)$ can be computed from the Hilbert series of $U_i$.
Now let $G_i$ be a matrix for $\overline{g} \in \End(U_i)$.
Then we have
$\rank (G_1)^3= \dim U_1/(0:g^3)$ and $\rank G_2 = \dim U_2/0:g$.
Thus, using Lemma~\ref{generalized_binomial} below,
$$\rank (G_1) ^3 + \rank G_2 + \rank (\times z) = $$
$$(\dim U_1 - s(n-2)-s'(n-2)-s''(n-2)) + (\dim U_2 - s(n-2)) + 2^{n-1}\alpha $$
$$=2^{n}\alpha - s(n) = \mbox{CoSperner number of } A$$
This completes the proof. (The algebra $A$ in fact has the SLP. This can be proved using
\cite[Theorem~5.2]{tHjW07}.)
Verification of the following lemma is left to the reader.
\begin{lemma} \label{generalized_binomial}
Fix a positive integer $\alpha$. Let $A$ be as above. Define the polynomial $h_n(q)$ by
\[
h_n(q)=(q^{\alpha -1}+q^{\alpha -2}+ \cdots + q +1)(q+1)^n
\]
Define the integers $s(n), s'(n), s''(n)$ to be the first three terms of the coefficients of
the polynomial $h_n(q)$ put in the decreasing order.
Then we have
\begin{enumerate}
\item
$h_n(q)$ is the Hilbert series of $A$.
\item
$h_{n-2}(q)$ is the Hilbert series of $U_1$ and $U_2[1]$.
\item
$s(n)$ is the Sperner number of $A$ and $s(n-1)$ is the Sperner number of $U_1$ and $U_2$.
\item
$2^n\alpha -s(n)$ is the CoSperner number of $A$ and $2^{n-1}\alpha -s(n-1)$ is the
CoSperner number of $U_1$ and $U_2$.
\item
$s(n)=s(n-1)+s'(n-1)$, for $n \geq 1$.
\item
$s(n)=2s(n-2)+s'(n-2)+s''(n-2)$, for $n \geq 2$.
\end{enumerate}
\end{lemma}
Now we prove Example~\ref{yokohama}.
As in the previous example it suffices to prove the WLP for
$$A:=K[x_0, x_1, \cdots, x_n]/(x_0 ^{\alpha}, x_1^2, x_1x_2, x_2 ^3, x_2x_3, \cdots,
x_{n-1}x_{n}, x_n^3 )$$ for any positive integer $\alpha$.
Put $A^{(n)}=A$, $B^{(n-1)}=A^{(n-2)}[z]/(z^2)$. Then we have the exact sequence:
\[
0 \ra (z) \ra A^{(n)} \ra A^{(n-1)} \ra 0.
\]
But
\[
(z)[1] \cong A/(0:z) \cong K[x_0, x_1, \cdots, x_n]/
(x_0 ^{\alpha}, x_1^2, x_1x_2, x_2^3, x_2x_3, \cdots, x_{n-1}^3, x_{n-1}, x_n^2) \cong B^{(n-1)}.
\]
Note that $A^{(n)}$ and $B^{(n)}$ have Hilbert series $$(1+q+ \cdots + a^{\alpha -1})(1+q)^n.$$
We are going to induct on $n$ so we assume the SLP for $A^{(n-2)}$ and $B^{(n-2)}$.
Put $z=x_n$. Then one sees easily that $\times z$ has the same Jordan decomposition as
the one treated in Example~\ref{simplified_yokohama}.
Thus the same proof as Example~\ref{simplified_yokohama} works verbatim in this case.
|
{
"timestamp": "2012-06-29T02:01:46",
"yymm": "1206",
"arxiv_id": "1206.6574",
"language": "en",
"url": "https://arxiv.org/abs/1206.6574"
}
|
\section{Introduction}\label{sec:intro}
In many areas of modern science, massive amounts of data are generated.
In the biomedical sciences, examples arise in
genomics, proteomics, and flow cytometry. New high-throughput experiments allow
researchers to look at the dynamics of very rich systems. With these vast increases in data accumulation,
scientists have found classical statistical techniques in need of improvement, and classical notions of error control (type 1 error)
overwhelmed.
Consider the following two class situation: our data consists of $n$
observations, each observation with a known class label of 1 or 2,
with $p$ covariates measured per observation. Let $y$ denote
the $n$-vector corresponding to class (with $n_1$ observations in
class $1$ and $n_2$ in class $2$),
and $X$, the $n\times p$ matrix of covariates. We often assume each row
of $X$ is independently normally distributed with some class specific mean
$\mu_{y(i)}\in\mathbb{R}^p$ and covariance $\Sigma_{y(i)}$ (for
instance in quadratic discriminant analysis). Here, we are interested in differences between classes. A
common example is gene expression data on healthy and diseased
patients: the covariates are the genes ($p\sim 20,000$), the
observations are patients ($n\sim 100$) belonging to either
the healthy or diseased class. Here, one might look at differences
between classes to develop a genetic prognostic test of the disease,
or to better understand its underlying biology.
Recent high dimensional procedures have
focused on detecting differences between
$\mu_1$ and $\mu_2$ by considering them one
covariate at a time.
In this paper we consider the more difficult problem of
testing marginal interactions. In a fashion similar to the approaches used in large scale
testing of main effects (see e.g \citet{DSB2003}, \citet{TTC01} and
\citet{efron2010ebayes}), we do this on a pair by pair basis.
The standard approach for this problem has been to run many bivariate logistic regressions and then conduct a post-hoc analysis on the nominal p-values. \citet{buzkova2011} has a nice summary of the subtle issues that arise in testing
for just a single interaction in a regression framework.
In particular, a permutation approach cannot be simply applied because
it tests the null hypothesis of both no interaction and no main effects at the same time. In the high-dimensional setting with FDR estimates, these issues are compounded.
The logistic regression based methods are all derived from what we call a {\em forward model}, that is, a model
for the conditional distribution of $Y|X$. In contrast, a {\em backward model} (discussed below) is a model
for the conditional distribution of $X|Y$. We propose a method, based on a backwards model, to approach this same problem. By using this backwards framework we avoid many of the pitfalls of standard approaches: we have a less model-based method, we attack a potentially more scientifically interesting quantity, and we can use a permutation null for FDR estimates. Our approach is unfortunately only for binary response --- the backwards model is more difficult to work with for continuous $y$.
In this paper we develop our method, and show its efficacy as compared to straightforward logistic regression on real and simulated data. We explain how to deal with nuisance variables, and give insight into our permutation-based estimates of FDR. We also give some asymptotic consistency results.
\section{Existing Methods}\label{sec:exist}
We begin by going more in-depth on the standard approach and its issues. In general one might like to specify a generative logistic model for the data (a forward model) of the form
\begin{equation}\label{eq:inter}
\operatorname{logit}\left[\operatorname{P}(y_i = 1 | X_{i,\cdot})\right] = \beta_0 + \sum_{j=1}^p \beta_j X_{i,j} + \sum_{k< j}\gamma_{j,k}
X_{i,j} X_{i,k}
\end{equation}
where $X_{i,\cdot}$ is the $i$-th row of $X$, and test if the $\gamma_{j,k}$ are nonzero in this model.
Here $i$ indexes the observations and $j,k$ index the predictors.
However, because it is a joint rather than a marginal model, this does not easily allow us to test individual pairs of covariates
separately from the others. Furthermore in the scenario with $n < p(p+1)/2$, the MLE for this model is not well defined (one can always get perfect separation) and non-MLE estimates are very difficult to use for testing.
Alternatively, for each pair $(X_{i,j}, X_{i,k})$ one might assume a generative logistic model of the form
\begin{equation}\label{eq:bivLog}
\operatorname{logit}\left[\operatorname{P}(y_i = 1 | X_{i,j},
X_{i,k})\right] = \beta_0 + \beta_j X_{i,j} + \beta_k X_{i,k} + \gamma_{j,k}X_{i,j} X_{i,k}
\end{equation}
and estimate or test $\gamma_{j,k}$ using the MLE $\hat\gamma_{j,k}$.
A standard approach to this problem in the past has been to fit pairwise logistic models~\eqref{eq:bivLog} independently for every pair $(j,k)$, and then use standard tools (ie. asymptotic normality of the MLE) to calculate approximate $P$-values. Once the $p(p-1)/2$ $p$-values are calculated, the approach of \citet{BH95} or some other standard procedure can be used to estimate/control FDR.
This approach has a number of problems. First of all, while the approach is very model-based, one cannot even ensure that all of the bivariate logistic models are consistent with one another (i.e. that there is a multivariate model with the given marginals). In particular, model misspecification will often cause over-dispersion resulting in anti-conservative FDR estimates. Also, if the true model contained quadratic terms (which we do not have in our model) then for correlated pairs of features this approach will compensate by trying to add false interactions. Even if we did believe the model, the p-values are only approximate, and this approximation grows worse as we move into the tails.
One might hope to avoid some of these issues by using permutation p-values, however, as shown in \citet{buzkova2011} permutation methods are incongruous with this approach --- they test the joint null hypothesis of no main effect or interaction, which is not the hypothesis of interest. This difficulty is also discussed in \citet{pesarin2001}. In an attempt to resolve this, \citet{kooperberg2008} regress out the main effects before permuting the residuals. This is a nice adjustment, but is still heavily model-based.
To deal with these issues, we take a step back and use a different generative model. Our generative model has an equivalent logistic model and this correspondence allows us to sidestep many of the issues with the standard logistic approach.
\subsection{Forward vs Backward Model}\label{sec:forVsback}
We propose to begin with a ``backward'' generative model
--- as mentioned in Section~\ref{sec:intro}, we assume that observations are Gaussian in each class $\left(x_i|y_i\right) \sim
N(\mu_{y(i)}, \Sigma_{y(i)})$ with a class specific mean and covariance matrix. We argue that the most natural test of interaction is a test of equality of correlations between groups.
Toward this end, let us apply Bayes theorem to our backwards generative model, to obtain
\begin{align*}
\operatorname{P}(y = 1 | x) &= \frac{\pi_1 \operatorname{exp}\left(l_1\right)}{\pi_2 \operatorname{exp}\left(l_2\right) + \pi_1
\operatorname{exp}\left(l_1\right)}\\
&= \frac{ \operatorname{exp}\left[\operatorname{log}(\pi_1/\pi_2) +
l_1 - l_2\right]}{1 + \operatorname{exp}\left[\operatorname{log}(\pi_1/\pi_2) +
l_1 - l_2\right]}
\end{align*}
where
\[
l_m = -p\operatorname{log}\left(2\pi\right)/2 - \operatorname{logdet}\left(\Sigma_m\right)/2 - (x-\mu_m)^{\top}\Sigma_m^{-1}(x-\mu_m)/2
\]
and $\pi_m$ is the overall prevalence of class $m$. We can simplify this to
\begin{align*}
\operatorname{logit} \left(P\right) &= \operatorname{logdet}\left(\Sigma_2\right)/2 - \operatorname{logdet}\left(\Sigma_1\right)/2 + \operatorname{log}(\pi_1/\pi_2) + \mu_2^{\top}\Sigma_2^{-1}\mu_2/2\\
&- \mu_1^{\top}\Sigma_1^{-1}\mu_1/2 + \left(\Sigma_1^{-1}\mu_1 - \Sigma_2^{-1}\mu_2\right)^{\top} x + x^{\top}\left(\Sigma_2^{-1} - \Sigma_1^{-1}\right) x/2.
\end{align*}
This is just a logistic model with interactions and quadratic terms, and in the form of \eqref{eq:inter} (with additional quadratic terms) we have
\begin{align*}
\beta_0 &= \operatorname{logdet}\left(\Sigma_2\right)/2 - \operatorname{logdet}\left(\Sigma_1\right)/2 + \operatorname{log}(\pi_1/\pi_2)\\
&+ \mu_2^{\top}\Sigma_2^{-1}\mu_2/2 - \mu_1^{\top}\Sigma_1^{-1}\mu_1/2\\
\beta_{j} &= \left(\Sigma_1^{-1}\mu_1 - \Sigma_2^{-1}\mu_2\right)_j\\
\gamma_{j,k} &= \left(\Sigma_2^{-1} - \Sigma_1^{-1}\right)_{j,k}.
\end{align*}
From here we can see that traditional logistic regression interactions in the full model correspond to nonzero off-diagonal elements of $\Sigma_2^{-1} - \Sigma_1^{-1}$. Testing for non-zero elements here is not particularly satisfying for a number of reasons. Because coordinate estimates are so intertwined, there is no simple way to marginally test for non-zero elements in $\Sigma_2^{-1} - \Sigma_1^{-1}$ --- in particular there is no straightforward permutation test. Also, for $n<p$ the MLEs for the precision matrices are not well defined.
As in the logistic model~\eqref{eq:bivLog} we may condition on only a pair of covariates $j$ and $k$ in our backwards model. Using Bayes theorem as above, our equivalent bivariate forward model is
\begin{align*}
\operatorname{P}(y = 1 |\, \tilde{x} = \left(x_j, x_k\right)^{\top}) &= \operatorname{log}(\pi_1/\pi_2) + \tilde{\mu}_2^{\top}\tilde{\Sigma}_2^{-1}\tilde{\mu}_2/2 -
\tilde{\mu}_1^{\top}\tilde{\Sigma}_1^{-1}\tilde{\mu}_1/2\\
& + \left(\tilde{\Sigma}_1^{-1}\tilde{\mu}_1 - \tilde{\Sigma}_2^{-1}\tilde{\mu}_2\right)^{\top} \tilde{x} + \tilde{x}^{\top}\left(\tilde{\Sigma}_2^{-1} - \tilde{\Sigma}_1^{-1}\right) \tilde{x}/2
\end{align*}
where $\tilde{\mu}_m$ and $\tilde{\Sigma}_m$ are the mean vector and covariance matrix in class $m$ for only $X_j$ and $X_k$. Hence the backwards model has an equivalent logistic model similar to ~\eqref{eq:bivLog} but with quadratic terms included as well. One should note that the main effect and interaction coefficients in this marginal model \emph{do not} match those from the full model (i.e. the marginal interactions and conditional interactions are different).
Our usual marginal logistic interaction between covariates $j$ and $k$ corresponds to a nonzero off-diagonal entry in $\tilde{\Sigma}_2^{-1} - \tilde{\Sigma}_1^{-1}$. Simple algebra gives
\[
\tilde{\Sigma}^{-1}_{m(1,2)} = -\left(\frac{R_{m(j,k)}}{\sigma_{m(j)}\sigma_{m(k)}\left(1-R_{m(j,k)}^2\right)}\right)
\]
where $R_{m(j,k)}$ is the correlation between features $j$ and $k$ in class $m$, and $\sigma_{m(j)}$ is the standard deviation of variable $j$ in class $m$.
Thus, if we were to test for ``logistic interactions'' in our pairwise backwards model, we would be testing:
\[
\frac{R_{1(j,k)}}{\sigma_{1(j)}\sigma_{1(k)}\left(1-R_{1(j,k)}^2\right)} = \frac{R_{2(j,k)}}{\sigma_{2(j)}\sigma_{2(k)}\left(1-R_{2(j,k)}^2\right)}
\]
Now, if $\sigma_{1(j)} = \sigma_{2(j)}$, and $\sigma_{1(k)} = \sigma_{2(k)}$, then this is equivalent to testing if $R_{1(j,k)} = R_{2(j,k)}$. If not, then a number of unsatisfying things may happen. For example if the variance of a single variable changes between classes, then, even if its correlation with other variables remains the same, it still has an ``interaction'' with all variables with which it is correlated. This change of variance is a characteristic of a single variable, and it seems scientifically misleading to call this as an ``interaction'' between a pair of features.
Toward this end, we consider a restricted set of null hypotheses --- rather than testing for an interaction between each pair of features $(j,k)$, we test the null $R_{1(j,k)} = R_{2(j,k)}$. Not all logistic interactions will have $R_{1(j,k)} \neq R_{2(j,k)}$, but we believe this is the property which makes an interaction physically/scientifically interesting.
To summarize, there are a number of issues in the forward model which are alleviated through the use of the backwards model:
\begin{itemize}
\item The marginal forward models are not necessarily consistent (one cannot always find a ``full forward model'' with the given marginals).
\item Omitted quadratic terms may be mistaken for interactions between correlated covariates.
\item Interesting interactions are only those for which $R_{1(j,k)} \neq R_{2(j,k)}$.
\item $P$-values are approximate and based on parametric assumptions.
\end{itemize}
\section{Proposal}\label{sec:method}
We begin with the generative model described in
Section~\ref{sec:forVsback}--- we assume observations are Gaussian in each class $\left(x_i|y_i\right) \sim
N(\mu_{y(i)}, \Sigma_{y(i)})$ with a class specific mean and covariance matrix. As argued above, we test for interactions by testing
\[
\mathbf{H}_{j,k}:\,R_{1(j,k)} = R_{2(j,k)}
\]
for each $j<k$, where again, $R_{m(j,k)}$ denotes the $(j,k)$-th entry of the correlation matrix for class $m$.
If we were only testing one pair of covariates $(j,k)$, a
straightforward approach would be to compare the sample correlation coefficients $\hat{R}_{1(j,k)}$ to
$\hat{R}_{2(j,k)}$. In general, because the variance of $\hat{R}_{m(j,k)}$ is dependent on
$R_{m(j,k)}$, it is better to make inference on a Fisher
transformed version of $\hat{R}_{m(j,k)}$:
\[
U_{m(j,k)} = \operatorname{arctanh}\left(\hat{R}_{m(j,k)}\right)
\dot{\sim}
N\left(\operatorname{arctanh}\left(R_{m(j,k)}\right),\frac{1}{n_m-3}\right)
\]
This is a variance stabilizing transformation. Now, to compare the two
correlations we consider the statistic
\begin{equation}\label{eq:stat}
T_{(j,k)} = U_{1(j,k)} - U_{2(j,k)} \dot{\sim}
N\left(\operatorname{arctanh}\left(R_{1(j,k)}\right) - \operatorname{arctanh}\left(R_{2(j,k)}\right),\frac{1}{n_1-3} + \frac{1}{n_2-3}\right)
\end{equation}
Under the null hypothesis: $R_{1(j,k)} = R_{2(j,k)}$, this statistic is
distributed $N\left(0,\frac{1}{n_1-3} + \frac{1}{n_2-3}\right)$. To test if the
correlations are equal we need only compare our statistic $T_{(j,k)}$
to its null distribution and find a $p$-value. While this approach works well for single tests, because we are in the high dimensional setting we use a different approach which doesn't rely on the statistic's asymptotic normal distribution.
We are interested in testing differences between two large correlation matrices in higher dimensional spaces. We again calculate the differences of our transformed sample correlations
--- we now calculate $p(p-1)/2$ statistics; one for each pair $(j,k)$
with $j<k$. However to assess significance we no longer just compare each statistic
to the theoretical null distribution and find a p-value. Instead we directly estimate false discovery rates (FDR): we choose some
threshold for our statistics, $t$, and reject (/call significant)
all $(j,k)$ with $|T_{(j,k)}| > t$. Clearly, not all marginal interactions called significant
in this way will be truly non-null and it is important to estimate the FDR
of the procedure for this cutoff, that is
\[
\operatorname{FDR} = E\left[\frac{\textrm{\# false rejections}}{\textrm{\# total rejections}}\right],
\]
where `\#' is short-hand for ``number of''. It is standard to approximate this quantity by
\begin{equation}\label{eq:FDR}
\frac{\hat{E}[\textrm{\# false rejections}]}{\textrm{\# total rejections}}.
\end{equation}
The denominator is just the number of $|T_{(j,k)}| > t$ (which we know). If we knew
which hypotheses were null and their distributions then one could find
the numerator by
\begin{equation}\label{eq:numer}
E[\textrm{\# false rejections}] = \sum_{(j,k) \textrm{ null}} \operatorname{P}(|T_{(j,k)}| > t)
\end{equation}
Clearly we don't know which hypotheses are null. To estimate
\eqref{eq:numer} we propose the following permutation approach.
We first center and scale our variables within class: for each observation we subtract off the class mean for each feature and divide by that feature's within-class standard deviation --- let $\tilde{X}$ denote this standardized matrix. This standardization doesn't change our original statistics, $T_{j,k}$ (the
correlation calculated from $X$ and $\tilde{X}$ are identical), but
is important for our null distribution. Now, let $\pi$
be some random permutation of $\{1,\ldots,n\}$. Thus, $\pi(y)$ is a
random permutation of the class memberships of the standardized variables (we keep the standardization from before the permutation). With these new class
labels we calculate a new set of $p(p-1)/2$ statistics,
$\{T^{*a}_{(j,k)}\}_{j<k}$. We can permute our data $A$ times, and
gather a large collection of these null statistics, ($Ap(p-1)/2$) of them. To estimate
$E[\textrm{\# false rejections}]$, we take the average number
of these statistics that lie above our cutoff
\[
\hat{E}[\textrm{\# false rejections}] = \frac{1}{A} \sum_{a=1}^A \#
\{|T^{*a}_{(j,k)}| > t\}
\]
Often, one is interested in the FDR of the $l$ most significant
interactions. In this case the cutoff, $t$, is chosen to be the
absolute value of the $l$-th most significant statistic, denoted
$T(l)$. We refer to this procedure as Testing Marginal Interactions through correlation (TMIcor) and summarize it below.
\medskip
\begin{center}
{\bf TMIcor: Algorithm for Testing Marginal Interactions}\label{alg:1}
\end{center}
\begin{enumerate}
\item Mean center and scale $X$ within each group.
\item Calculate the feature correlation matrices $\hat R_1$ and $\hat R_2$ within each class.
\item Fisher transform the entries (for $j<k$): $U_{m(j,k)} = \operatorname{arctanh}\left(\hat{R}_{m(j,k)}\right)$\\
and take their coordinate-wise differences: $T_{(j,k)} = U_{1(j,k)} - U_{2(j,k)}$
\item for $a=1,\ldots\, ,A$ execute the following
\begin{enumerate}
\item Randomly permute class labels of the standardized variables.
\item Using the new class labels, reapply steps 2-4 to calculate new
statistics $\{T^{*a}_{(j,k)}\}_{j<k}$
\end{enumerate}
\item Estimate FDR for any $l$ most significant interactions by
\[
\widehat{{\rm FDR}} = \frac{\left(\frac{1}{A}\right) \sum_{a=1}^A \#\{|T^{*a}_{(j,k)}| > T(l)\}}{l}
\]
\end{enumerate}
Using this approach, one gets a ranking of pairs of features and an FDR estimate for every position in the ranking. Furthermore, rather than testing for interactions between all pairs
of variables, one may instead test for interactions between variables in one set
(such as genes) and variables in another (such as environmental variables). To do this, one would only need restrict the statistics considered in steps $3$, $4b$ and $5$.
Standardizing in step $(1)$ before permuting may seem strange, but in this case is necessary. If we do not standardize first, we are testing the joint null that the means, variances and correlations are the same between classes. This is precisely what we moved to the backward model to avoid --- by standardizing we avoid permuting the ``main effects''. We discuss this permutation-based estimate of FDR in more depth in appendix A.
\section{Comparisons}\label{sec:comparisons}
In this section we apply TMIcor and the standard logistic approach to real and simulated data. On simulated data we see that in some scenarios (in particular with main effects) the usual approach has serious power issues as compared to TMIcor. Similarly on our real dataset we see that the usual approach does a poor job of finding interesting interactions, while TMIcor does well.
\subsection{Simulated Data}
We attempt to simulate a simplified version of biological data. In general, groups of proteins or genes act in concert based on biological processes. We model this with a block diagonal correlation matrix --- each block of proteins/genes is equi-correlated. This can be interpreted as a latent factor model --- all the proteins in a single block are highly correlated with the same latent variable (maybe some unmeasured cytokine), and conditional on this latent variable, the proteins are all uncorrelated. In our simulations we use $10$ blocks, each with $10$ proteins ($100$ total proteins). We simulate the proteins for our healthy controls as jointly Gaussian with $0$ mean and covariance matrix
\[
\Sigma_1 = \begin{pmatrix}
R_1 & 0 & \cdots & 0\\
0 & R_2 & \cdots & 0\\
\vdots & \vdots & \vdots & \vdots\\
0 & \cdots & 0 & R_{10}
\end{pmatrix}
\]
where each $R_i$ is a $10\times 10$ matrix with $1$s along the diagonal, and a fixed $\rho_i>0$ for all off-diagonal entries. Now, for our diseased patients we again use mean $0$ proteins, but change our covariance matrix to
\[
\Sigma_2 = \begin{pmatrix}
\tilde{R}_1 & 0 & \cdots & 0\\
0 & R_2 & \cdots & 0\\
\vdots & \vdots & \vdots & \vdots\\
0 & \cdots & 0 & R_{10}
\end{pmatrix}
\]
where $\tilde{R}_1$ has $1$s on the diagonal and $\tilde{\rho}_1$ for all off-diagonal entries (with $0\leq \tilde{\rho}_1 \neq \rho_1$). This correlation structure would be indicative of a mutation in the cytokine for the first group causing a change in the association between that signaling protein and the rest of the group.
Within each class (diseased and healthy) we simulated $250$ patients and applied TMIcor and the usual logistic approach. We averaged the true and estimated false discovery rates of these methods over $10$ trials. As we can see from Figure~\ref{fig:1} TMIcor outperforms the logistic approach. This difference is particularly pronounced in the second plot of Figure~\ref{fig:1}. In this plot, because the correlations are large but different in both groups ($\rho_1 = 0.3$, $\tilde{\rho}_1 = 0.6$), there are some moderate quadratic effects in the true model --- this induces a bias in the logistic approach and its FDR suffers. In contrast, these quadratic effects are not problematic in the backward framework.
\begin{figure}[t!]
\centerline{
\mbox{\includegraphics[width=2.85in]{cor2.pdf}}
\mbox{\includegraphics[width=2.85in]{cor1.pdf}}
}
\caption{Plots of estimated and true FDR for TMIcor and logistic regression averaged over $10$ trials. Error bars contain the mean value $\pm$ 1 se of the mean. For controls, $\rho_i = 0.3$ for all $i$. On the left $\tilde{\rho}_1 = 0$, while on the right $\tilde{\rho}_1 = 0.6$. There is no main effect in either panel.}
\label{fig:1}
\end{figure}
We also consider a second set of simulations. This set used $\rho_i = 0.3$ for all $i$ and $\tilde{\rho}_1 = 0$. However, instead of mean $0$ in both classes, we set the mean for all proteins in block 1 for diseased patients to be some $\tilde{\mu}_1$ ($> 0$). The results are plotted in Figure~\ref{fig:2}. This mean shift had no effect on TMIcor (the procedure is meanshift invariant), but as the mean difference grows, it becomes increasingly difficult for the logistic regression to find any interactions. This issue is especially important as, biologically, one might expect that genes with main effects to be more likely to have true marginal interactions (and these interactions may also be more scientifically interesting).
\begin{figure}[t!]
\centerline{
\mbox{\includegraphics[width=2.85in]{mean1.pdf}}
\mbox{\includegraphics[width=2.85in]{mean2.pdf}}
}
\caption{Plots of estimated and true FDR for TMIcor and logistic regression averaged over $10$ trials. Error bars contain the mean value $\pm$ 1 se of the mean. For both plots $\tilde{\rho}_1 = 0$ and $\rho_i = 0.3$ for all $i$. Both panels have main effects --- on the left $\tilde{\mu}_1 - \mu_1=0.5$, while on the right $\tilde{\mu}_1 - \mu_1 = 1$.}
\label{fig:2}
\end{figure}
While these simulations are not exhaustive, they give an indication of a number of scenarios in which TMIcor significantly outperforms logistic regression. More exhaustive simulations were run and the results mirrored those in this section.
\subsection{Real Data}
We also applied both TMIcor and logistic regression to the colitis gene expression data of \citet{burczynski2006}. In this dataset, there are $127$ total patients, $85$ with colitis ($59$ Crohn's patients + $26$ ulcerative colitis patients) and $42$ healthy controls. We restricted our analysis to the $101$ patients without ulcerative colitis. Each patient had expression data for $22283$ genes run on an Affymetrix U133A microarray. Because chromosomes $5$ and $10$ have been indicated in Crohn's disease, we enriched our dataset by using only the genes on these chromosomes, along with the $NOD2$ and $ATG16L1$ genes (chromosomes as specified by the $C1$ geneset from \citet{subramanian2005}). In total $663$ genes were used. Some of these genes were measured by multiple probesets --- the final expression values used for those genes were the average of all probesets.
From these $663$ genes we have $219,453$ of interactions to consider. Figure~\ref{fig:fdr} shows the estimated FDR curves for the two methods. TMIcor finds many more significant interactions --- at an FDR cutoff of $0.1$, TMIcor finds $2570$ significant interactions, while the logistic approach finds $15$. The significant $15$ from the logistic approach may not even be entirely believeable --- the smallest p-value of the $15$ is roughly $1/219453$, which is what we would expect it to be if all null hypotheses were true. Because the smallest p-value is large, we see that the FDR for logistic regression begins surprisingly high. The FDR subsequently drops because there are a number of p-values near the smallest, however, the significance of these hypotheses is still suspect.
\begin{figure}[!t]
\centerline{
\includegraphics[width=3in]{FDRlarge.pdf}
}
\caption{Corhn's data; FDR estimates for TMIcor and logistic approaches for the $5000$ most significant marginal interactions}
\label{fig:fdr}
\end{figure}
Unfortunately interpreting $2570$ marginal interactions is difficult (even if all are true). Toward this end we consider the graphical representation of our analysis in Figure~\ref{fig:graphBig}. Each gene is a node in our graph, and edges between genes signify marginal interactions. In this plot we considered only the $1250$ of the $2570$ significant marginal interactions indicative of a decrease in correlation from healthy control to Crohn's (ie. $T_{j,k} > 0$). There is one large connected component, a few connected pairs and a large number of isolated genes. The connected component appears to be split into $2$ clusters. To get a better handle on this, we considered a more stringent cutoff for significant interactions --- at an FDR cutoff of $0.03$, we are left with $832$ significant interactions of which only $402$ have $T_{j,k} > 0$. We plot this graph in Figure~\ref{fig:graphSmall}: we see that our large connected component has divided into $2$. From here we further zoomed in on each component (now displaying only the $50$ most significant interactions per component), and can actually see which genes are are most important (in figure~\ref{fig:graphComp}).\\
\begin{figure}[!t]
\centerline{
\includegraphics[width=3in]{graphBig.pdf}
}
\caption{Graph of $1250$ marginal interactions (with decreasing correlation) significant at FDR cutoff of $0.1$. Genes with no significant interactions not shown}
\label{fig:graphBig}
\end{figure}
\begin{figure}[!t]
\centerline{
\includegraphics[width=3in]{graphSmall.pdf}
}
\caption{Graph of $402$ marginal interactions (with decreasing correlation) significant at FDR cutoff of $0.03$. Genes with no significant interactions not shown}
\label{fig:graphSmall}
\end{figure}
\begin{figure}[t!]
\centerline{
\mbox{\includegraphics[width=2.85in]{graphComp1.pdf}}
\mbox{\includegraphics[width=2.85in]{graphComp2.pdf}}
}
\caption{Graphs of the top $50$ marginal interactions in each cluster (and corresponding genes)}
\label{fig:graphComp}
\end{figure}
It appears, from this analysis, that there are two genetic pathways which are modified in Crohn's disease. Many of the genes in each cluster are already known to be indicated in Crohn's, but to our knowledge these interactions have not been considered.
\section{Dealing with Nuisance Variables}\label{sec:nuis}
Often, aside from the variables of interest, one may believe that other nuisance variables play a role in complex interactions. For example, it seems reasonable that many genes are conditionally independent given age, but are each highly correlated with age. Ignoring age, these genes would appear to be highly correlated, but this correlation is uninteresting to us. TMIcor can be adapted to deal with these nuisance variables provided there are few compared to the number of observations, they are continuous, and they are observed.
We resolve this issue by using partial correlations. Assume $x_j$ and $x_k$ are our variables of interest, and $z$ is a vector of potential confounders. Rather than comparing $\operatorname{cor}\left(x_j, x_k\right)$ in groups $1$ and $2$, we compare the partial correlations, $\operatorname{cor}\left(\left[x_j|z\right], \left[x_k|z\right]\right)$. This is done by first regressing our potential confounders, $Z$, out of all the other features, then running the remainder of the analysis as usual.
To adapt the original algorithm in Section~\ref{sec:method} to deal with nuisance variables we need only replace step $(1)$ by:
\begin{enumerate}
\item Replace our feature matrices $X_1$ and $X_2$ by
\[
\tilde{X}_m = \left[I - Z_m\left(Z_m^{\top}Z_m\right)Z_m^{\top}\right]X_m
\]
Now, mean center and scale $\tilde{X}$ within each group.
\end{enumerate}
We give more details motivating this approach and discussing potential computational advantages in appendix B.
\section{Asymptotics}\label{sec:asymptotics}
In this section we give two asymptotic results. We show that if $n\rightarrow \infty$, and $\frac{\log p_n}{n} \rightarrow 0$, then under certain regularity conditions our procedure for testing marginal interactions (in the absence of nuisance variables) is asymptotically consistent --- with probability approaching $1$ it calls significant all true marginal interactions and makes no false rejections. Furthermore, using the permutation null, it also consistently estimates that the true FDR is converging to $0$. Because we only need $\frac{\log p_n}{n} \rightarrow 0$, $p_n$ may increase very rapidly in $n$.
We first give a result showing that for sub-Gaussian variables our null statistics converge to $0$ and our alternative statistics are asymptotically bounded away from $0$. The proof of this theorem is based on several technical lemmas which we relegate to appendix C.
\begin{theorem}\label{thm:con}
Let $\tilde{x}_{1(j)}$ and $\tilde{x}_{2(j)}$, $j=1,\ldots$ be random variables. Assume there is some $C>0$ such that for all $t\geq 0$
\[
\operatorname{P}\left(\left|x_{m(j)} - \operatorname{E}[x_{m(j)}]\right| > t\right) \leq \operatorname{exp}\left(1-t^2/C^2\right)
\]
for each $m=1,2$ . Let $\mu_{i(j)}$ denote the mean of $\tilde{x}_{m(j)}$ and $\sigma_{m(j)}^2$ its variance. For each $i\leq\infty$, let $x_{m(i,\cdot)}$ be independent realizations with the same distribution as $\tilde{x}_{m(\cdot)}$.
Let $p_n$ be a sequence of integers such that $\frac{\log p_n}{n} \rightarrow 0$. Let $R_{m}$ be the correlation ``matrix'' (an infinite but countably indexed matrix) of the covariates from group $m$. Let $I$ denote the set of ordered pairs $(j,k)$ for which $R_{1(j,k)} \neq R_{2(j,k)}$, and $C_n$ denote the set of ordered pairs $(j,k)$ with $j,k\leq p_n$.
Assume for every $m$ and $j$, $\sigma_{m(j)}^2 \geq \sigma_{min}^2$ (for some $\sigma_{min}^2 > 0$). Furthermore, assume that for all $(j,k)$ in each $I$, $\left|R_{1(j,k)} - R_{2(j,k)}\right| > \Delta_{\min}$ for some $\Delta_{\min}>0$ and that for $m=1,2$, $\operatorname{sup}_{j<k}\left|R_{m(j,k)}\right| < \rho_{\max}$ for some fixed $\rho_{\max} < 1$.\\
Now, given any $\epsilon_p >0$, and $0 < t < \Delta_{\min}$, if we choose $n$ sufficiently large, then with probability at least $1 - \epsilon_p$
\[
\left|T_{(j,k)}\right| \leq t
\]
for all $(j,k)$ in $C_n - I$, and
\[
\left|T_{(j,k)}\right| \geq t
\]
for all $(j,k)$ in $C_n\cap I$.
\end{theorem}
The notation here is a little bit tricky, but the result is very straightforward: under some simple conditions, we find all marginal interactions and make no false identifications.
While there were a number of assumptions in the above theorem, most of these are fairly trivial and will almost always be found in practice: the variance must be bounded away from $0$ and the correlations bounded away from $\pm 1$. The assumption that the correlation differences are bounded below by a fixed $\Delta_{\min}$ for true marginal interactions is a bit more cumbersome, but may easily be relaxed to $\Delta_{\min} \rightarrow 0$ at a slow enough rate that $\Delta_{min} /\left[\log p /n\right]^{1/2} \rightarrow \infty$.
The astute reader might note that our assumption bounding the variance away from $0$ seems strange --- the distribution of the sample correlation is independent of the variance. This is necessary only because we assumed the covariates have a subgaussian tail with a shared constant $C$. One could have relaxed the bounded variance assumption to the assumption that $\left\{x_j/\sigma_j\right\}_{j=1,\ldots}$ have a sub-Gaussian tail with a shared constant $C$.
\subsection{Permutation Consistency}
Now that we have shown our procedure has FDR converging to $0$, we would like to show that it asymptotically estimates FDR consistently as well. In particular we show that as $n\rightarrow \infty$, if $\frac{\log p}{n}\rightarrow 0$, then with probability approaching $1$, for a random permutation, our permuted statistics converge to $0$ uniformly in probability ($\max_{j,k}\left|T_{(j,k)}^*\right| \leq t$ for any fixed $t>0$ with probability converging to $1$). Thus our estimated FDR converges to $0$ under the same conditions as our true FDR.
We begin with some notation. Let us consider an arbitrary permutation of class labels, $\Pi$. Let $\hat{\pi}$ denote the proportion of observations from class $1$ that remain in class $1$ after permuting.
We discuss a somewhat simplified procedure in our proof, as otherwise the algebra becomes significantly more painful (without any added value in clarity), but it is straightforward to carry the proof through to the full procedure. In our original procedure, after permuting class labels we recenter and rescale our variables within each class. Because we already centered and scaled variables before permuting, this step will have very little effect on our procedure (though it does have the nice effect of never giving $|\rho^{*}| > 1$). In this proof we consider a procedure identical in every way except without recentering and rescaling within each permutation.
Before we give the theorem, we would like to define a few new terms for clarity. For a given permutation $\Pi$, let $\Pi_i(m)\in\left\{0,1\right\}$ be the permuted class of the $i$-th observation originally in class $m$. Furthermore, let $\Pi\left(m,l\right)$ be the set of observations in class $m$ that
are permuted to class $l$, and let $\Pi\left(\cdot,l\right)$ be the set of observations in both classes permuted to class $l$, ie.
\begin{align*}
\Pi\left(m,l\right) &= \left\{i:\,\Pi_i(m) = l\right\}\\
\Pi\left(\cdot,l\right) &= \left\{(i,m):\,\Pi_i(m) = l\right\}
\end{align*}
Now, we give a result which shows that for any fixed $t>0$ if our variables are sub-Gaussian with some other minor conditions, then for $n\rightarrow \infty$ and $\log p/n\rightarrow 0$ with probability approaching $1$, none of our permuted statistics will be larger than $t$, or in other words, as our true converged to $0$, so will our estimated FDR $0$. As before, the proof of this theorem is based on several technical lemmas which we again leave to appendix C.
\begin{theorem}\label{thm:perm}
Let $\tilde{x}_{1(j)}$ and $\tilde{x}_{2(j)}$, $j=1,\ldots$ be random variables with
\[
\operatorname{P}\left(|x_{m(j)} - \operatorname{E}\left[x_{m(j)}\right]|\geq t\right) \leq 1 - e^{t^2/C}
\]
for all $t>0$, and each $m=1,2$, with some fixed $C>0$. Let $\mu_{m(j)}$ denote the mean of $\tilde{x}_{m(j)}$ and $\sigma_{m(j)}^2$ its variance. For each $i\leq\infty$, let $x_{m(i,\cdot)}$ be independent realizations with the same distribution as $\tilde{x}_{m(\cdot)}$.
Let $p_n$ be a sequence of integers such that $\frac{\log p_n}{n} \rightarrow 0$. Let $R_{m}$ be the correlation ``matrix'' (an infinite but countably indexed matrix) of the covariates from class $m$.
Assume for every $m,\,j$, $\sigma_{m(j)}^2 \geq \sigma_{min}^2$ (for some $\sigma_{min}^2 > 0$). Furthermore, assume that for $m=1,2$, $\operatorname{sup}_{j<k}\left|R_{m(j,k)}\right| < \rho_{\max}$ for some fixed $\rho_{\max} < 1$.
Now, given any $\epsilon_p >0$, and $0 < t$, if we choose $n$ sufficiently large and let $\Pi$ be a random permutation, then with probability at least $1 - \epsilon_p$
\[
\left|T_{(j,k)}^*\right| \leq t
\]
for all $(j,k)$ with $j,k \leq p_n$ where
\[
T_{(j,k)}^* = \operatorname{arctanh}\left(\hat{R}_{\operatorname{perm:1(j,k)}}\right) - \operatorname{arctanh}\left(\hat{R}_{\operatorname{perm:2(j,k)}}\right)
\]
and
\[
\hat{R}_{\textrm{perm}:m(j,k)} = \frac{1}{n}\sum_{(i,l)\in\Pi(\cdot,m)}\left(\frac{x_{l(i,j)} - \hat{\mu}_{l(j)}}{\hat{\sigma}_{l(j)}}\right)\left(\frac{x_{m(i,k)} - \hat{\mu}_{l(k)}}{\hat{\sigma}_{l(k)}}\right)
\]
\end{theorem}
The notation is again somewhat ugly, but the result is very straightforward: under some simple conditions, our permuted statistics are very small. In particular from the proof one can see that $\operatorname{sup}\left\{T_{(j,k)}^*\right\} = O_p\left(\sqrt{\log p_n/n}\right)$.
Note there is an implicit indexing of $n$ in $\hat{R}_{\textrm{perm}:m(j,k)}$ (it seemed unneccessary to add more indices). As in theorem~\ref{thm:con}, some of our conditions may be relaxed. Instead of bounding $\sigma_j^2$ below, we need only bound $C\sigma_j$ below. Also, rather than choose a fixed cutoff, $t>0$, we may use any sequence $\left\{t_n\right\}$ with $t_n/\left(\log p_n/n\right)^{1/2} \rightarrow \infty$. Also, as noted before, the result we have just shown ignores the restandardizing within each permutation, however it is straightforward (though algebraicly arduous, and not insightful) to extend this result to that case as well.
As a last note, in theorem~\ref{thm:perm}, we gave our consistency result for only a single permutation. This result can easily be extended to any fixed number of permutations using a union bound. This was left out of the original statement/proof as the notation is already clunky and the extension is straightforward.
Through theorems \ref{thm:con} and \ref{thm:perm} we have shown that, under fairly relaxed conditions, our procedure is asymptotically consistent at discovering marginal interactions and that the permutation null reflects this.
\section{Discussion}
In this paper we have discussed marginal interactions for logistic regression in the framework of forward and backward models. We have developed a permutation based method, TMIcor, which leverages the backward model. We have shown its efficacy on real and simulated data and given asymptotic results showing its consistency and convergence rate. We also plan to release a publically available {\tt R} implementation.
\section{Appendix A}\label{sec:perm}
In this section we give more details on our permutation-based estimate of FDR, and discuss a potential alternative. Recall that we are using the permutations to approximate
\begin{equation}\label{eq:numer}
\sum_{(j,k) \textrm{ null}} \operatorname{P}(|T_{(j,k)}| > t).
\end{equation}
For the moment, assume that all covariates in both classes have mean $0$
and variance $1$, and that we did not do any sample standarization. Then, under the null hypothesis that $R_{1(j,k)} =
R_{2(j,k)}$, $T_{(j,k)}$ calculated under the original class
assignments and $T^*_{(j,k)}$ calculated under any permuted class
assignments have the same distribution, so
\[
\sum_{(j,k) \textrm{ null}} \operatorname{P}(|T_{(j,k)}| > t) = \sum_{(j,k) \textrm{ null}} \operatorname{P}(|T^*_{(j,k)}| > t)
\]
which is reasonably (and unbiasedly) approximated by
\[
\sum_{(j,k) \textrm{ null}} \frac{1}{A}\sum_{a=1}^AI(|T^{*a}_{(j,k)}| > t).
\]
Because we do not know which genes are null, our actual estimate of
\eqref{eq:numer} is
\begin{align}\label{eq:bias}
\sum_{(j,k)} \frac{1}{A}\sum_{a=1}^AI(|T^{*a}_{(j,k)}| > t) &=\sum_{(j,k) \textrm{ null}} \frac{1}{A}\sum_{a=1}^A I(|T^{*a}_{(j,k)}| >
t)\\
&+ \sum_{(j,k) \textrm{ alternative}} \frac{1}{A}\sum_{a=1}^A I(|T^{*a}_{(j,k)}| > t)
\end{align}
This gives a slight conservative bias (especially small if most marginal interactions are null). One should also note that
unlike the null statistics, for the alternative $(j,k)$, $T^*_{(j,k)}$
are not distributed $N\left(0,\frac{2}{n-3}\right)$; they are still
mean $0$, but the variance is increased. However, this conservative bias is very slight --- in general there are few alternative hypotheses, and the variance increase is not large.
Because in practice we do not have mean $0$, variance $1$ for all
covariates in both classes, we must standardize before running our
procedure. Otherwise, instead of testing for a changing correlation,
we are actually testing for a different mean, variance, or correlation
between classes. The effect of standardizing with the sample mean and variance rather than the true values is asymptotically washed out, and while the variance of our tests is increased for small samples, this increase is only minimal.
An alternative to permutations, as discussed in \citet{efron2010ebayes}, is to directly
estimate the numerator using the approximate theoretical distribution of the null
statistics. Each null statistic is asymptotically
$N\left(0,\frac{1}{n_1-3} + \frac{1}{n_2-3}\right)$, so for $(j,k)$ null
\[
\operatorname{P}(|T_{(j,k)}| > t) = 2\Phi\left(-\frac{t (n_1 -3) (n_2-3)}{n_1
+ n_2 - 6}\right).
\]
Now we can conservatively approximate the quantity in Eq~\eqref{eq:numer} by
\begin{align*}
\sum_{(j,k) \textrm{ null}}P\left(|T_{(j,k)}| > t\right) &\leq p(p-1)/2 \cdot P\left(|T_{\textrm{null}}| > t\right)\\
&= p(p-1)\cdot\Phi\left(-\frac{t (n_1 -3) (n_2-3)}{n_1
+ n_2 - 6}\right)
\end{align*}
While this approach is reasonable and simple, it is less robust than using permutations, and in practice, even for truly Gaussian data, it is only slightly more efficient.
\section{Appendix B}
Before proceeding, we remind the reader that $x$ are our variables of interest and $z$ are potential confounding variables. Furthermore we are interested in comparing $\operatorname{cor}\left(\left[x_j|z\right], \left[x_k|z\right]\right)$ between groups. From basic properties of the Gaussian distribution we know that
\[
x|z\sim N\left[\mu_x + \Sigma_{(x,z)}\Sigma_{z}^{-1}\left(z - \mu_z\right), \Sigma_{(x|z)}\right]
\]
where $\Sigma_{(x|z)}$ is the variance/covariance matrix of $x$ given $z$, $\Sigma_{(x,z)}$ is the covariance matrix between $x$ and $z$, $\Sigma_z$ is the variance matrix of $z$, and $\mu_x$ and $\mu_z$ are the means of $x$ and $z$. Now, if $\mu_x,\,\mu_z,\,\Sigma_{(x,z)},$ and $\Sigma_{z}$ were known, then the MLE for $\Sigma_{(x|z)}$ would be
\[
\hat{\Sigma}_{(x|z)} = \frac{1}{n} \left[X - 1\mu_x^{\top} - \left(Z - 1\mu_z^{\top}\right)\Sigma_{z}^{-1}\Sigma_{(z,x)}\right]^{\top}\left[X - 1\mu_X^{\top} - \left(Z - 1\mu_Z^{\top}\right)\Sigma_{Z}^{-1}\Sigma_{(z,x)} \right].
\]
Unfortunately, these nuisance parameters are unknown. However we can also estimate them by maximum likelihood. This gives us the estimate
\begin{align*}
\hat{\Sigma}_{(X|Z)} &= \frac{1}{n}\left[\tilde{X} - \tilde{Z}\left(\tilde{Z}^{\top}\tilde{Z}\right)^{-1}\tilde{Z}^{\top}\tilde{X}\right]^{\top}\left[\tilde{X} - \tilde{Z}\left(\tilde{Z}^{\top}\tilde{Z}\right)^{-1}\tilde{Z}^{\top}\tilde{X}\right]\\
&=\frac{1}{n}\left[\operatorname{P}_{\tilde{Z}\perp}\left(\tilde{X}\right)\right]^{\top}\left[\operatorname{P}_{\tilde{Z}\perp}\left(\tilde{X}\right)\right]
\end{align*}
where $\tilde{Z}$ is the standardized version of $Z$, and $\tilde{X}$ is the standardized version of $X$, and $\operatorname{P}_{\tilde{Z}\perp}$ is the projection onto the orthogonal complement of the column space of $\tilde{Z}$. So, our estimate of partial correlation is just an estimate of correlation with $Z$ regressed out of both covariates. We use this to contruct our permutation null. In the orginal algorithm, we mean centered and scaled before permuting; here we do the equivalent --- we project our variables of interest onto the orthogonal complement of our nuisance variables, and then center/scale them. Now we are ready to permute. We permute these ``residuals'', and calculate permuted correlations as before.
Before proceeding, we note that for $n$ sufficiently large $n$ ($n >> p$) one might use a similar approach to consider partial correlations rather than marginal correlations in our original algorithm (conditioning out all covariates except any particular $2$). However, in general $n << p$ and thus $\operatorname{P}_{\perp} \equiv 0$ rendering this approach ineffective --- this approach only works for nuisance variables because we assume that there are very few relative to the number of observations.
As stated in the text, to adapt the original algorithm to deal with nuisance variables we need only replace step $(1)$ by:
\begin{enumerate}
\item Replace our feature matrices $X_1$ and $X_2$ by
\[
\tilde{X}_m = \left[I - Z_m\left(Z_m^{\top}Z_m\right)Z_m^{\top}\right]X_m
\]
Now, mean center and scale $\tilde{X}$ within each group.
\end{enumerate}
One may note that we only calculate $\tilde{X}$ once per class, at the beginning of our procedure, not in each permutation. We do this for a similar reason that we standardize our variables before permuting --- because we are not testing the hypothesis that the relationship between $X$ and $Z$ is the same in both groups. If we relcalulate after each permutation then we are implicitly assuming that this relationship is the same in both groups under the null.
Even with nuisance variables this approach is very computationally fast. Projecting our original variables onto $Z\perp$ can be done in $O\left(npp_{\textrm{nuis}}\right)$ operations where $p_{\textrm{nuis}}$ is the number of nuisance variables. Thus the total runtime of this algorithm is $O\left(npp_{\textrm{nuis}} + Anp(p-1)/2\right)$ where $A$ is the number of permutations --- this is dominated by the second term, which is independent of the number of nuisance parameters. In contrast, if we were to use the standard approach (fitting pairwise logistic regressions with nuisance variables), its runtime would be $O\left[\left(iter\right)(3+p_{\textrm{nuis}})^2np(p-1)/2\right]$ where $iter$ is the number of iterations of the algorithm for finding the MLE. In general $A \sim 100$ and $iter \sim 5$. Now, since $(3+p_{\textrm{nuis}})^2$ grows very quickly in $p_{\textrm{nuis}}$, for even a small number of nuisance parameters the logistic approach becomes much slower.
\section{Appendix C}
This appendix contains the technical details from the theorems in section~$7$ of the main manuscript. We begin with a number of technical lemmas:
First, as one might imagine, if we can consistently estimate our correlation matrices, applying a Fisher transformation should not change much. We formalize this with the next lemma.
\begin{lemma}\label{lemma3}
Let $R_1$, $R_2$ be correlation matrices, and $\hat{R}_1$, $\hat{R}_2$ be estimates of $R_1$ and $R_2$.
Let $I$ be the set of ordered pairs $(j,k)$ where $R_{1(j,k)} \neq R_{2(j,k)}$. Assume for all $(j,k)$ in $I$, $\left|R_{1(j,k)} - R_{2(j,k)}\right| > \Delta_{\min}$ for some $\Delta_{\min} > 0$ and that for $m=1,2$ we have $\operatorname{sup}_{j<k}\left\|R_{m(j,k)}\right\|_{\infty} < \rho_{\max}$ for some fixed $\rho_{\max} < 1$.\\
Further assume that for $m=1,2$, $\left\|R_m - \hat{R}_m\right\|_{\infty} \leq \delta$ (for some $\delta < 1-\rho_{\max}$). Then for all $(j,k)$ in $I^{c}$ with $j \neq k$ we have
\begin{equation}\label{eq:close}
\left|\operatorname{arctanh}\left(\hat{R}_{1(j,k)}\right) - \operatorname{arctanh}\left(\hat{R}_{2(j,k)}\right)\right| \leq \frac{2\delta}{1-\left(\rho_{\max} + \delta\right)^2}
\end{equation}
and for all $(j,k)$ in $I$ with $j \neq k$ we have
\begin{equation}\label{eq:far}
\left|\operatorname{arctanh}\left(\hat{R}_{1(j,k)}\right) - \operatorname{arctanh}\left(\hat{R}_{2(j,k)}\right)\right| \geq \Delta_{\min} - 2\delta
\end{equation}
\end{lemma}
One immediate consequence of this lemma is that as $\delta \rightarrow 0$, for $(j,k)$ in $I^{C}$ our statistics $T_{(j,k)}$ converge to $0$ (at rate O($\delta)$), and for $(j,k)$ in $I$, $T_{(j,k)}$ are bounded away from $0$ (at a rate of at least O($\delta)$).
\begin{proof}[{\bf Proof of Lemma~\ref{lemma3}}]
We begin by showing that for all $(j,k)$ in $I^{c}$ with $j \neq k$ we have
\[
\left|\operatorname{arctanh}\left(\hat{R}_{1(j,k)}\right) - \operatorname{arctanh}\left(\hat{R}_{2(j,k)}\right)\right| \leq \frac{2\delta}{1-\left(\rho_{\max} + \delta\right)^2}
\]
The mean value theorem gives us that
\[
\left|\operatorname{arctanh}\left(\hat{R}_{1(j,k)}\right) - \operatorname{arctanh}\left(\hat{R}_{2(j,k)}\right)\right| \leq \operatorname{sup}_{r}\left|\frac{1}{1-r^2}\right|\left|\hat{R}_{1(j,k)} - \hat{R}_{2(j,k)}\right|
\]
where the supremum is taken over $r$ in $\left[\hat{R}_{1(j,k)},\, \hat{R}_{2(j,k)}\right]$. Note that for $m=1,2$, we have $|\hat{R}_{m(j,k)}| < \rho_{\max} + \delta$, and $\left|\hat{R}_{1(j,k)} - \hat{R}_{2(j,k)}\right| \leq 2\delta$, for $(j,k)$ not in $I$. Thus,
\[
\operatorname{sup}_{r}\left|\frac{1}{1-r^2}\right|\left|\hat{R}_{1(j,k)} - \hat{R}_{2(j,k)}\right| \leq \frac{2\delta}{1-\left(\rho_{\max} + \delta\right)^2}.
\]
Now for $(j,k)$ in $I$, we again use the mean value theorem:
\[
\left|\operatorname{arctanh}\left(\hat{R}_{1(j,k)}\right) - \operatorname{arctanh}\left(\hat{R}_{2(j,k)}\right)\right| \geq \operatorname{inf}_{r}\left|\frac{1}{1-r^2}\right|\left|\hat{R}_{1(j,k)} - \hat{R}_{2(j,k)}\right|
\]
and our result follows because $\left|\hat{R}_{1(j,k)} - \hat{R}_{2(j,k)}\right| \geq \Delta_{\min} - 2\delta$.
\end{proof}
Now we consider convergence of these sample correlation matrices. We show that their convergence depends only on the convergence of the sample means ($\hat{\mu}_j$), variances ($\hat{\sigma}_j^2$), and pairwise inner products. We formalize this in the following lemma.
\begin{lemma}\label{lemma1}
Let $\tilde{x}_j$, $j=1,\ldots$ be random variables. Let $\mu_j$ denote the mean of $\tilde{x}_j$ and $\sigma_j^2$ its variance. Let $R_{j,k}$ be the correlation between $\tilde{x}_j$ and $\tilde{x}_k$. For each $i$, let $x_{i,\cdot}$ be independent realizations with the same distribution as $\tilde{x}_{\cdot}$ (eg. $x_{i,j}$ has the marginal distribution of $\tilde{x}_j$).
For any given $\epsilon > 0$, there exists $\delta > 0$ such that if
\begin{equation}\label{eq:bnd}
\operatorname{sup} \left\{\left|\hat{\sigma}_j - \sigma_j\right|,\, \left|\hat{\mu}_j - \mu_j\right|,\,\left|\frac{(1/n)\sum_{i\leq n}x_{i,j}x_{i,k}}{\sigma_j\sigma_k} - \frac{\mu_j\mu_k}{\sigma_j\sigma_k} - R_{j,k}\right|\right\}_{j,k} \leq \delta
\end{equation}
then
\begin{equation}\label{eq:bnd0}
\operatorname{sup}_{j<k \leq p} \left|\hat{R}_{j,k} - R_{j,k}\right| \leq \epsilon
\end{equation}
Furthermore, one can choose $\delta = O(\epsilon)$
\end{lemma}
\begin{proof}[{\bf Proof of Lemma~\ref{lemma1}}]
We begin by noting that the distribution of $\hat{R}_{j,k}$ is independent of $\mu_j$, $\mu_k$, $\sigma_j$ and $\sigma_k$. For ease of notation we assume $\mu_j = \mu_k = 0$ and $\sigma_j = \sigma_k = 1$.\\
To see that \eqref{eq:bnd} is sufficient for \eqref{eq:bnd0} we write $\hat{R}_{j,k} - R_{j,k}$ as
\begin{align*}
\left|\hat{R}_{j,k} - R_{j,k}\right| &= \left|\frac{\left(1/n\right)\sum_{i=1}^n x_{i,j}x_{i,k}}{\hat{\sigma}_j\hat{\sigma}_k} - \frac{\hat{\mu}_j\hat{\mu}_k}{\hat{\sigma}_j\hat{\sigma}_k} - R_{j,k}\right|\\
&\leq \left|\frac{1}{n}\sum_{i=1}^n x_{i,j}x_{i,k}\right|\left|\left(\frac{1}{\hat{\sigma}_j\hat{\sigma}_k} - 1\right)\right|\\
&+ \left|\frac{1}{n}\sum_{i=1}^n x_{i,j}x_{i,k} - R_{j,k}\right| + \left|\frac{\hat{\mu}_j\hat{\mu}_k}{\hat{\sigma}_j\hat{\sigma}_k}\right|
\end{align*}
We first note that $\left|\frac{1}{n}\sum_{i=1}^n x_{i,j}x_{i,k} - R_{j,k}\right| < \delta$. Thus we need only consider $\left|\frac{\hat{\mu}_j\hat{\mu}_k}{\hat{\sigma}_j\hat{\sigma}_k}\right|$ and $\left|\left(\frac{1}{\hat{\sigma}_j\hat{\sigma}_k} - 1\right)\right|$. Expanding these terms using the fact that $1/(1-\delta) = 1 + O(\delta)$, it is straightforward to see that the whole expression converges to $0$ at rate $O(\delta)$. This completes our proof.
\end{proof}
Now that we have reduced convergence to that of the sample mean, variance, and inner products, we show particular circumstances under which our estimation is consistent, and give rates of convergence.
\begin{lemma}\label{lemma2}
Let $\tilde{x}_j$, $j=1,\ldots$ be random variables. Assume there is some $C>0$ such that for all $t\geq 0$
\[
\operatorname{P}\left(\left|x_j - \operatorname{E}[x_j]\right| > t\right) \leq \operatorname{exp}\left(1-t^2/C^2\right)
\]
(These are known as sub-Gaussian random variables). Let $\mu_j$ denote the mean of $\tilde{x}_j$ and $\sigma_j^2$ its variance. Let $R_{j,k}$ be the correlation between $\tilde{x}_j$ and $\tilde{x}_k$. For each $i$, let $x_{i,\cdot}$ be independent realizations with the same distribution as $\tilde{x}$.
Let $\delta,\, \epsilon_p > 0$ be given. Then for $n$ sufficiently large and $\frac{\log p}{n}$ sufficiently small we have that
\begin{equation}\label{eq:lem2}
\operatorname{sup} \left\{\left|\hat{\sigma}_j - \sigma_j\right|,\, \left|\hat{\mu}_j - \mu_j\right|,\,\left|\frac{(1/n)\sum_{i\leq n}x_{i,j}x_{i,k}}{\sigma_j\sigma_k} - \frac{\mu_j\mu_k}{\sigma_j\sigma_k} - R_{j,k}\right|\right\}_{j,k\leq p} \leq \delta
\end{equation}
with probability greater than $1-\epsilon_p$. In particular one can choose $\delta = O\left(\log p / n\right)^{1/2}$.
\end{lemma}
The class of subgaussian random variables is rather broad, containing gaussian random variables and all bounded random variables. Applying this lemma, we are able to show consistency for the wide class of variables with sufficiently light tails.
In the proof of this lemma we get a convergence rate of $\delta = O\left(\log p / n\right)^{1/2}$. This rate agrees with the literature for other similar problems in covariance estimation (\citet{bickel2008} among others).
\begin{proof}[{\bf Proof of Lemma~\ref{lemma2}}]
We will begin by bounding $\left|\hat{\mu}_j - \mu_j\right|$. If we consider Lemma~$5.10$ of \citet{vershynin2010} we see that
\[
\operatorname{P}\left(\left|\hat{\mu}_j - \mu_j\right| > t\right) \leq e\cdot \operatorname{exp}\left[-\left(\tilde{C}t^2\right)n\right]
\]
where $\tilde{C}$ is some function of $C$ (one can prove this Hoeffding type inequality by an exponential Markov argument). Applying the union bound to this we see that
\[
\operatorname{P}\left(\operatorname{sup}_{j\leq p}\left|\hat{\mu}_j - \mu_i\right| > t\right) \leq 3 p \operatorname{exp}\left[-\left(\tilde{C}t^2\right)n\right]
\]
If we set $t = \left(\sqrt{1/C}\right)\sqrt{\frac{q + \log p}{n}}$ then we have
\[
\operatorname{P}\left(\operatorname{sup}_{j\leq p}\left|\hat{\mu}_j - \mu_j\right| > t\right) \leq e^{1-q},
\]
bounding $\left|\hat{\mu}_j - \mu_j\right|$.\\
Next we bound $\left|\hat{\sigma}_j - \sigma_i\right|$. We first note that
\[
\left|\hat{\sigma}_j - \sigma_j\right| = \frac{\left|\hat{\sigma}_j^2 - \sigma_j^2\right|}{\hat{\sigma}_j + \sigma_j} \leq \frac{\left|\hat{\sigma}_j^2 - \sigma_j^2\right|}{\sigma_j}
\]
because $\hat{\sigma_j}, \sigma_j >0$.
so we need only consider convergence of $\hat{\sigma}_j^2 - \sigma_j^2$. Next note that
\[
\frac{1}{n}\sum_i \left(x_{i,j} - \bar{x}_j\right)^2 - \frac{1}{n}\sum_i \left(x_{i,j} - \mu_j\right)^2 = -\left(\bar{x}_j - \mu_j\right)^2
\]
So now if we can bound $\left|\frac{1}{n}\sum_i \left(x_{i,j} - \mu_j\right)^2 - \sigma_j^2\right|$ and $\left(\bar{x}_j - \mu_j\right)^2$, then we can bound $|\hat{\sigma}_j^2 - \sigma_j^2|$.\\
To bound $\left|\frac{1}{n}\sum_i \left(x_{i,j} - \mu_j\right)^2 - \sigma_j^2\right|$, we first note that if $x_{i,j}$ is sub-Gaussian then $(x_{i,j} - \mu_j)^2$ is subexponential; ie
\[
\operatorname{P}\left(\left(x_{i,j} - \mu_j\right)^2 - \sigma_i > t\right) \leq \operatorname{exp}\left(-C_1 t\right)
\]
for some fixed $C_1$. Now we apply Corollary~$5.17$ of \citet{vershynin2010}, and get that for any $t$ sufficiently small (independent of $n$)
\[
\operatorname{P}\left(\frac{1}{n}\sum_i\left(x_{i,j} - \mu_j\right)^2 > t\right) \leq 2\operatorname{exp}\left(-\tilde{C}_1 t^2\right)
\]
for some fixed $\tilde{C}_1$. Bounding $\left(\bar{x}_j - \mu_j\right)^2$ is also quite straightforward (we just use the bound for $\left|\bar{x}_j - \mu_j\right|$)
\[
P\left(\left(\bar{x}_j - \mu_j\right)^2 \geq t\right) \leq e \operatorname{exp}\left[-\left(\tilde{C}t\right)n\right]
\]
We note that for $t<1$, $t^2 < t$. Let $\bar{C} = \min\{\tilde{C}_1,\tilde{C}\}$. Now, combining these inequalities with the triangle inequality we have
\begin{align*}
P\left(\left|\hat{\sigma}_j^2 - \sigma_j^2\right| \geq t\right) &\leq e\operatorname{exp}\left[-\left(\tilde{C}t\right)n\right] + 2\operatorname{exp}\left(-\tilde{C}_1 t^2\right)\\
& \leq 5\operatorname{exp}\left[-\bar{C}t^2n\right]
\end{align*}
for $t$ sufficiently small. Now finally,
\[
P\left(\left|\hat{\sigma}_j - \sigma_j\right| \geq t\right) \leq P\left(\left|\hat{\sigma}_j^2 - \sigma_j^2\right| \geq t\sigma_{\min}\right) \leq 5\operatorname{exp}\left[-\bar{C}\sigma_{\min}^2t^2n\right].
\]
Using the union bound again, we get
\[
P\left(\operatorname{sup}_{j}\left|\hat{\sigma}_j^2 - \sigma_j^2\right| \geq t\right) \leq 5p\operatorname{exp}\left[-\bar{C}t^2n\right].
\]
so
\[
P\left(\operatorname{sup}_{j}\left|\hat{\sigma}_j - \sigma_j\right| \geq t\right) \leq 5p\operatorname{exp}\left[-\bar{C}\sigma_{\min}^2t^2n\right].
\]
Finally, we need to bound $\left|\frac{(1/n)\sum_{i\leq n}x_{i,j}x_{i,k}}{\sigma_j\sigma_k} - \frac{\mu_j\mu_k}{\sigma_j\sigma_k} - \rho_{j,k}\right|$. This is slightly trickier but still not terrible. We first note that
\[
(1/n)\sum_{i\leq n}x_{i,j}x_{i,k} - \mu_j\mu_k = (1/n)\sum_{i\leq n}\left(x_{i,j} - \mu_j\right)\left(x_{i,k}-\mu_k\right)
\]
We also see that
\begin{align*}
2\sum_{i\leq n}\left(x_{i,j} - \mu_j\right)\left(x_{i,k}-\mu_k\right) &= \sum_{i\leq n}\left[\left (x_{i,j} - \mu_j\right) + \left(x_{i,k}-\mu_k\right)\right]^2\\
& - \sum_{i\leq n}\left (x_{i,j} - \mu_j\right)^2 - \sum_{i\leq n}\left (x_{i,k} - \mu_k\right)^2
\end{align*}
Now to bound the above quantity we consider the moment generating function of $x_{i,j} - \mu_j + x_{i,k}-\mu_k$. This not necessarily the sum of independent random variables, still by Cauchy Schwartz we have
\begin{align*}
&\operatorname{E}\left[\operatorname{exp}\left[t\left(x_{i,j} - \mu_j + x_{i,k}-\mu_k\right)\right]\right]\\
&\leq \operatorname{max}\left\{\operatorname{E}\left[\operatorname{exp}\left[2t\left(x_{i,j} - \mu_j\right)\right]\right],\operatorname{E}\left[\operatorname{exp}\left[2t\left(x_{i,k} - \mu_k\right)\right]\right]\right\}
\end{align*}
It is a well known fact that sub-gaussan random variables can be charaterized by their MGF (shown in \citet{vershynin2010}), and this is still the moment generating function of a subgaussian random variable. Thus, $\left(x_{i,j} - \mu_j + x_{i,k}-\mu_k\right)^2$ is sub-exponential, and again by Corollary~$5.17$ of \citet{vershynin2010} we have that
\begin{align*}
&\operatorname{P}\left(\left|\frac{1}{n}\sum_{i}\left(x_{i,j} - \mu_j + x_{i,k}-\mu_k\right)^2 - \sigma_j^2 - \sigma_k^2 - 2\sigma_j\sigma_k\rho_{j,k}\right| > t\right)\\
&\leq 2\operatorname{exp}\left[-C_2t^2n\right].
\end{align*}
for $t>0$ sufficiently small and some fixed $C_2 > 0$. Now, stringing all of these together with the triangle inequality we have that
\begin{align*}
&\operatorname{P}\left(\left|\frac{2}{n}\sum_{i\leq n}\left(x_{i,j} - \mu_j\right)\left(x_{i,k}-\mu_k\right) - 2\rho\sigma_j\sigma_k\right| > 3t\right)\\
&\leq \operatorname{P}\left(\left|\frac{1}{n}\sum_{i\leq n}\left(x_{i,j} - \mu_j + x_{i,k}-\mu_k\right)^2 - \sigma_j^2 + \sigma_k^2 - 2\sigma_j\sigma_k\rho_{j,k}\right| > t\right)\\
&+ \operatorname{P}\left(\left|\frac{1}{n}\sum_{i\leq n}\left (x_{i,j} - \mu_j\right)^2 - \sigma_j^2\right| > t\right) + \operatorname{P}\left(\left|\frac{1}{n}\sum_{i\leq n}\left (x_{i,k} - \mu_k\right)^2 - \sigma_k^2\right| > t\right)\\
&\leq 2\operatorname{exp}\left[-C_2t^2n\right] + 2*5\operatorname{exp}\left[-\bar{C}t^2n\right]\\
&\leq 12\operatorname{exp}\left[-\bar{C}_1t^2n\right]
\end{align*}
for all $t>0$ sufficiently small with some fixed $\bar{C}_1>0$. Taking this a step further, and applying the union bound, we see that
\[
P\left(\operatorname{sup}_{j,k}\left|\frac{(1/n)\sum_{i\leq n}x_{i,j}x_{i,k}}{\sigma_j\sigma_k} - \frac{\mu_j\mu_k}{\sigma_j\sigma_k} - \rho_{j,k}\right| > t\right) \leq 12p^2 \operatorname{exp}\left[-\bar{C}_2t^2n\right]
\]
for some fixed $\bar{C}_2$.\\
Now that we have bounded each term, we see that \eqref{eq:lem2} happens with probability at most
\begin{align*}
&12p^2 \operatorname{exp}\left[-\bar{C}_2\delta^2n\right] + 2*5p\operatorname{exp}\left[-\bar{C}\sigma_{\min}^2\delta^2n\right] + 2*3p\operatorname{exp}\left[-\tilde{C}\delta^2n\right]\\
&\leq 28p^2\operatorname{exp}\left[-\mathbf{C}\delta^2 n\right]
\end{align*}
for $\delta$ sufficiently small where $\mathbf{C} = \min\left\{\bar{C}\sigma_{\min}^2,\bar{C_2},\tilde{C}\right\}$. Thus, if $\delta = \left(\frac{q + 2\log p}{\mathbf{C}n}\right)^{1/2}$ then we have \eqref{eq:lem2} with probability at least $1-28e^{-q}$. If $n$ is sufficiently large, and $\frac{\log p}{n}$ sufficiently small, then for any $q$, $\delta$ can be made arbitrarily small.
\end{proof}
Now, we combine these lemmas to show that under certain conditions, for a given cutoff $t$, as $n\rightarrow\infty$ if $\log p/n\rightarrow 0$ then, with probability approaching $1$, all true marginal interactions have $|T_{i,j}| > t$, and all null statistics will have $|T_{i,j}| < t$ (ie. we asymptotically find all true interactions and make no false rejections).
Before we begin, it deserves mention that we use slightly different notation than in the discussion of our algorithm in Section~$3$. Rather than having $X_{i,\cdot}$ denote the $i$-th observation overall, and letting $y(i)$ denote its group (where $i$ ranged from $1$ to the total number of observations in both groups), we split up our observations by group, letting $x_{m(i,\cdot)}$ denote the $i$-th observation from group $m$ (now $i$ ranges from $1$ to the total number of observations in group $m$). This change simplifies notation in the statement of the theorem and its proof. We also assume equal group sizes ($n_1 = n_2 = n$), this again simplifies notation but can be relaxed to $n_1/(n_1 + n_2) \rightarrow \alpha \in (0,1)$.
\begin{proof}[{\bf Proof of Theorem~6.1}]
This result is a straightforward corollary of our $3$ lemmas:
First choose an arbitrary $\epsilon_p > 0$, and $0 < t < \Delta_{\min}$. If we consider Lemma~\ref{lemma1}, we see that the conclusion of our theorem holds if we can find a bound on the sup-norm distance between each correlation matrix and its MLE (a bound I will call $\delta_1$) which satisfies
\[
\max\left\{\frac{2\delta_1}{1-\left(\rho_{\max} + \delta_1\right)^2},\, \Delta_{\min} - 2\delta_1\right\} \leq t.
\]
Because $\rho_{\max} < 1$, $\delta_1 > 0$ sufficiently small will satisfy this.\\
Now applying Lemma~\ref{lemma2}: if we choose $\delta_2$ sufficiently small (but still of $O(\delta_1)$), then if
\begin{equation}\label{thm:bound1}
\operatorname{sup} \left\{\left|\hat{\sigma}_j - \sigma_j\right|,\, \left|\hat{\mu}_j - \mu_j\right|,\,\left|\frac{(1/n)\sum_{i\leq n}x_{i,j}x_{i,k}}{\sigma_j\sigma_k} - \frac{\mu_j\mu_k}{\sigma_j\sigma_k} - \rho_{j,k}\right|\right\}_{j,k} \leq \delta_2
\end{equation}
we have that the sup norm distance between each correlation matrix and its MLE is bounded by $\delta_1$: for $m=1,2$
\[
\left\| \hat{R}_m - R_m \right\|_{\infty}\leq \delta_1
\]
Finally, by Lemma!\ref{lemma3}, we see that if $n$ is sufficiently large and $\log p / n$ is sufficiently small then \eqref{thm:bound1} holds with probability at least $1-\epsilon_p$. This finishes our proof.
\end{proof}
\subsection{Proofs of Permutation Results}
To begin, we prove a Lemma which does most of the leg-work for our eventual theorem. It says that for a reasonably balanced permutation, for $n$ sufficiently large and $\log p/n$ sufficiently small, both of our permuted sample correlation matrices will be very close to the average of the $2$ population correlation matrices.
\begin{lemma}\label{lemma:perm}
Let $\tilde{x}_{1(j)}$ and $\tilde{x}_{2(j)}$, $j=1,\ldots$ be random variables with
\[
\operatorname{P}\left(|x_{m(j)} - \operatorname{E}\left[x_{m(j)}\right]|\geq t\right) \leq 1 - e^{t^2/C}
\]
for all $t>0$, and each $m=1,2$, with some fixed $C>0$. Let $\mu_{m(j)}$ denote the mean of $\tilde{x}_{m(j)}$ and $\sigma_{m(j)}^2$ its variance. For each $i < \infty$, let $x_{m(i,\cdot)}$ be independent realizations with the same distribution as $\tilde{x}_{m(\cdot)}$.
Let $p_n$ be a sequence of integers such that $\frac{\log p_n}{n} \rightarrow 0$. Let $R_{m}$ be the correlation ``matrix'' (an infinite but countably indexed matrix) of the covariates from class $m$. Define $R_{\operatorname{perm}}$ to be the average of the two,
\[
R_{\textrm{perm}} = \frac{1}{2} R_1 + \frac{1}{2}R_2
\]
Let $\hat{\mu}_{m(j)}$ and $\hat{\sigma}_{m(j)}^2$ be the pre-permuted estimates of the mean and variance (in each class):
\[
\hat{\mu}_{m(j)} = \frac{1}{n}\sum_{i \leq n} x_{m(i,j)}
\]
and
\[
\hat{\sigma}_{m(j)}^2 = \frac{1}{n}\sum_{i \leq n} \left(x_{m(i,j)} - \hat{\mu}_{m(j)}\right)^2.
\]
Further, define
\[
\hat{R}_{\textrm{perm}:m(j,k)} = \frac{1}{n}\sum_{(i,l)\in\Pi(\cdot,m)}\left(\frac{x_{m(i,j)} - \hat{\mu}_{m(j))}}{\hat{\sigma}_{m(j)}}\right)\left(\frac{x_{m(i,k)} - \hat{\mu}_{m(k)}}{\hat{\sigma}_{m(k)}}\right)
\]
our permuted correlation between covariates $j$ and $k$ in class $m$.
Assume for every $j$, $\sigma_j^2 \geq \sigma_{min}^2 > 0$. Now for any $\epsilon >0$, $\delta>0$, one can find $n$ sufficiently large such that for any permutation, $\Pi$ with
\[
\left|\hat{\pi} - \frac{1}{2}\right|\leq \frac{\delta}{12}
\]
(where $\hat{\pi}$ is the proportion of class $1$ that remains fixed under $\Pi$). We have
\begin{equation}\label{perm:bnd}
\left\|R_{\textrm{perm}} - \hat{R}_{\textrm{perm}:m}\right\|_{\infty} \leq \delta
\end{equation}
for both $m=1,2$ with probability at least $1-\epsilon$.
\end{lemma}
\begin{proof}[{\bf Proof of Lemma~\ref{lemma:perm}}]
We first consider only $m=1$. If we can show that
\begin{equation}\label{perm:bnd}
\left\|R_{\textrm{perm}} - \hat{R}_{\textrm{perm}:m}\right\|_{\infty} \leq \delta
\end{equation}
with high probability for $m=1$, then by symmetry we have it for $m=2$, and by a simple union bound we have it for both simultaneously.\\
Now, we begin by decomposing our sample permuted correlation matrix
\begin{align*}
\hat{R}_{\textrm{perm}:1} &= \frac{1}{n}\sum_{(i,m)\in\Pi(\cdot,1)}\left(\frac{x_{m(i,j)} - \hat{\mu}_{m(j)}}{\hat{\sigma}_{m(j)}}\right)\left(\frac{x_{m(i,k)} - \hat{\mu}_{m(k)}}{\hat{\sigma}_{m(k)}}\right)\\
&= \hat{\pi}\hat{R}_{\textrm{perm}:1}^{(1)} + \left(1 - \hat{\pi}\right)\hat{R}_{\textrm{perm}:1}^{(2)}
\end{align*}
where $\hat{R}_{\textrm{perm}:1}^{(l)}$ is a matrix defined by
\begin{equation}\label{eq:contrib}
\hat{R}_{\textrm{perm}:1(j,k)}^{(l)} = \frac{1}{\tilde{n}_l}\sum_{i\in\Pi(l,1)}\left(\frac{x_{l(i,j)} - \hat{\mu}_{1(j)}}{\hat{\sigma}_{1(j)}}\right)\left(\frac{x_{l(i,k)} - \hat{\mu}_{1(k)}}{\hat{\sigma}_{1(k)}}\right)
\end{equation}
where $\tilde{n}_l$ is the number of elements from group $l$ permuted to group $1$ (ie. the cardinality of $\Pi(l,l)$ or more explicitly $\tilde{n}_1 = \hat{\pi}n$ and $\tilde{n}_2 = (1-\hat{\pi}n$). The quantity \eqref{eq:contrib} is just the contribution from observations originally in class $l$ to the permuted correlation matrix for class $1$. Thus by the triangle inequality
\begin{align} \label{eq:permTriangle}
\left\|R_{\textrm{perm}} - \hat{R}_{\textrm{perm}:1}\right\|_{\infty} &\leq \left\|\frac{1}{2} R_1 - \hat{\pi}\hat{R}_{\textrm{perm}:1}^{(1)}\right\|_{\infty} + \left\|\frac{1}{2} R_2 - \left(1 - \hat{\pi}\right)\hat{R}_{\textrm{perm}:1}^{(1)}\right\|_{\infty}\\
&\leq \frac{1}{2} \left\|R_1 - \hat{R}_{\textrm{perm}:1}^{(1)}\right\|_{\infty} + \frac{1}{2}\left\|R_2 - \hat{R}_{\textrm{perm}:1}^{(2)}\right\|_{\infty}\notag\\
&+ \left|\hat{\pi} - \frac{1}{2}\right|\left(\left\|\hat{R}_{\textrm{perm}:1}^{(1)}\right\|_{\infty} + \left\|\hat{R}_{\textrm{perm}:1}^{(2)}\right\|_{\infty}\right)\notag
\end{align}
If we consider $\hat{R}_{\textrm{perm}:1}^{(1)}$, we see that it is essentially a sample correlation matrix (using only the $\hat{\pi}n$ observations that were fixed in class $1$ by $\Pi$ for the inner product). We can make a similar observation for $\hat{R}_{\textrm{perm}:1}^{(2)}$. Now, for $n$ sufficiently large, because $|\frac{1}{2} - \hat{\pi}|$ is small, we can make $\hat{\pi}n$ and $\left(1-\hat{\pi}\right)n$ as large as we would like. Thus, by a combination of Lemma~\ref{lemma3} and Lemma~\ref{lemma2}, we have that
\[
\left\|R_l - \hat{R}_{\textrm{perm}:1}^{(l)}\right\|_{\infty} < \delta/3
\]
with probability greater than $1-\epsilon/3$. Furthermore, using the same Lemmas we get
\[
\left\|\hat{R}_{\textrm{perm}:1}^{(1)}\right\|_{\infty} + \left\|\hat{R}_{\textrm{perm}:1}^{(2)}\right\|_{\infty} \leq 4
\]
with probability at least $1-\epsilon/3$ (this bound can easily be made tighter, and if we were to standardize within permutation this bound is trivial). Plugging this in with the assumed bound on $\left|\hat{\pi} - \frac{1}{2}\right|$ completes the proof.
\end{proof}
Now, we use this Lemma (along with some of our previous Lemmas) to show that for any fixed $t>0$ if our variables are subgaussian with some other minor conditions, then for $n\rightarrow \infty$ and $\log p/n\rightarrow 0$ with probability approaching $1$, none of our permuted statistics will be larger than $t$, or in other words our estimated FDR will converge to $0$.
\begin{proof}[{\bf Proof of Theorem~6.2}]
First we choose an arbitrary $2\epsilon_p >0$ and $t>0$. If we consider Lemma~$7.1$, we see that if we find some $\delta >0$ satisfying
\begin{equation}\label{eq:need}
\left\|R_{\textrm{perm}} - \hat{R}_{\textrm{perm}:m}\right\|_{\infty}
\end{equation}
for $m=1,2$ with probability at least $1-\epsilon_p$ and
\begin{equation}\label{eq:bd}
\frac{2\delta}{1-(\rho_{\max} + \delta)^2} \leq t
\end{equation}
then we have satisfied our claim. Because, $\rho_{\max} < 1$, there exists some $\delta >0$ satisfying \eqref{eq:bd}. Now, we first note that, for $n$ sufficiently large, standard concentration inequalities give us that
\[
\left|\hat{\pi} - \frac{1}{2}\right| \leq \delta/12
\]
with probability greater than $1 - \epsilon_p$. If we apply Lemma~$7.5$ with this bound on $\hat{\pi}$ and combine the probabilities with the union bound, we get that for $n$ sufficiently large \eqref{eq:need} is violated with at most probability $2\epsilon_p$. This completes our proof.
\end{proof}
\section{Acknowledgments}
We would like to thank Jonathan Taylor and Trevor Hastie for their helpful comments and insight.
\bibliographystyle{abbrvnat}
|
{
"timestamp": "2012-06-29T02:00:43",
"yymm": "1206",
"arxiv_id": "1206.6519",
"language": "en",
"url": "https://arxiv.org/abs/1206.6519"
}
|
\section*{Introduction}
\label{sec:introduction}
This paper is concerned with the Crepant Resolution Conjecture for Donaldson-Thomas (DT) invariants as stated by Bryan-Cadman-Young \cite[Conjectures 1 and 2]{crc}.
The goal is to understand the relationship between the DT invariants of a CY3 orbifold $\orb{X}$, satisfying the hard Lefschetz condition, and the DT invariants of a natural crepant resolution $Y \to X$ of its coarse moduli space $X$.
Concretely \eqref{star}, we relate counting invariants of $\orb{X}$ and $Y$.
The proof employs a derived equivalence between $\orb{X}$ and $Y$ (worked out in \cite{chentseng}), which is a ``global'' version of the McKay correspondence of Bridgeland-King-Reid \cite{bkr}.
We prove that the image of the heart $\Coh(\orb{X})$ under this equivalence is Bridgeland's category of perverse coherent sheaves $\Per(Y/X)$ \cite{tomflops}.
Before writing down the formula in symbols, it is profitable to spend a few words on the setup of the conjecture.\footnote{The reader interested in more background on DT theory (and curve-counting in general) could start from \cite{13}.}
Given a smooth and projective Calabi-Yau\footnote{\label{sign}For us \emph{Calabi-Yau} means having trivial canonical bundle $\omega_M \cong \O_M$ and torsion fundamental group $H^1(M,\O_M)=0$.} threefold $M$, we can define the DT invariants of $M$ as weighted Euler characteristics\footnote{There is a minor sign issue in this definition. We expand upon it in Remarks \ref{sign issue crc} and \eqref{sign issue again crc}. A quick inspection will show that the main formulae we prove hold regardless of sign conventions.}
\begin{align*}
\DT_M(\beta,n) := \chi_\text{top} \left( \Hilb_M(\beta,n), \nu \right) = \sum_{k \in \mathbb{Z}} k \chi_{\topp} \left( \nu^{-1}(k) \right)
\end{align*}
where $\chi_\text{top}$ is the topological Euler characteristic, $\beta \in N_1(M)$ is the homology\footnote{A precise definition will be given in Subsection \ref{reminder}.} class of a curve, $n$ is an integer, $\Hilb_M(\beta,n)$ is the Hilbert (or Quot) scheme parameterising quotients of $\O_M \twoheadrightarrow E$ of class $(\ch_0 E, \ch_1 E, \ch_2 E,\ch_3 E) = (0,0,\beta,n)$ and $\nu$ is Behrend's microlocal function \cite{behrend}
We formally package these numbers into a generating series.
\begin{align*}
DT(M) := \sum_{(\beta,n) \in N_1(M) \oplus \mathbb{Z}} DT_M(\beta,n) q^{(\beta,n)}
\end{align*}
Taking Chern characters (and using \cite[Lemma 2.2]{tomcc}) we can we replace $N_1(M) \oplus \mathbb{Z}$ with the numerical Grothendieck group.
To be precise, we let $N(M)$ be the K-group of coherent sheaves on $M$ modulo numerical equivalence and we define $F_1N(M)$ to be the subgroup spanned by sheaves supported in dimension at most one.
It follows that $DT(M)$ can alternatively be indexed by $F_1N(M)$:
\begin{align*}
DT(M) = \sum_{\alpha \in F_1N(M)} DT_M(\alpha) q^\alpha
\end{align*}
and we will switch between one indexing and the other depending on circumstances.
There is also a subgroup $F_0N(M)$ spanned by sheaves supported in dimension zero and we can define
\begin{align*}
DT_0(M) := \sum_{\alpha \in F_0N(M)} DT_M(\alpha) q^\alpha.
\end{align*}
Let now $\orb{X}$ be a projective Calabi-Yau orbifold of dimension three and let $X$ be its coarse moduli space.
By \cite{bkr,chentseng} there is a crepant resolution $Y \to X$ of $X$ given by an appropriate Hilbert scheme of points of $\orb{X}$.
\begin{center}
\begin{tikzpicture}
\matrix (m) [matrix of math nodes, row sep=3em, column sep=3em, text height=1.5ex, text depth=0.25ex]
{
Y & & \orb{X} \\
& X & \\
};
\path[->,font=\scriptsize]
(m-1-1) edge node[auto,swap]{$f$} (m-2-2)
(m-1-3) edge node[auto]{$g$} (m-2-2)
;
\end{tikzpicture}
\end{center}
The global McKay correspondence tells us that, moreover, $Y$ and $\orb{X}$ are derived equivalent via Fourier-Mukai transforms
\begin{align*}
\Phi: D(Y) \rightleftarrows D(\orb{X}):\! \Psi
\end{align*}
inducing isomorphisms between the corresponding (numerical) K-groups.
We also assume that the fibres of $f$ are at most one-dimensional.\footnote{By \cite[Lemma 24]{bg}, a case-by-case analysis shows that this is equivalent to $\orb{X}$ satisfying the hard Lefschetz condition.}
We denote by $F_\text{mr}N(\orb{X})$ the image of $F_1N(Y)$ via $\Phi$ and define\footnote{The subscript $_\text{mr}$ stands for multi-regular, see \cite{crc}.}
\begin{align*}
DT_\text{mr}(\orb{X}) := \sum_{\alpha \in F_\text{mr}(\orb{X})} DT_\orb{X}(\alpha) q^\alpha
\end{align*}
where the DT number of class $\alpha$ for $\orb{X}$ is defined in the same way, by taking the weighted Euler characteristic of the Hilbert scheme parameterising quotients of $\O_\orb{X}$ of class $\alpha$.
Unfortunately, at this point an ugly technical condition creeps in.
We also need some \emph{partial} DT numbers $\text{DT}^\partial_{\mr}(\orb{X})$ which are given by taking the weighted Euler characteristic of some \emph{open} subspaces of the $\Hilb_\orb{X}(\alpha)$.
We will come back to this subtle point below.
See also Remark \ref{remark}.
The main formula we prove (Corollary \ref{mr=exc}) is
\begin{align}\label{star}\tag{$\bigstar$}
\text{DT}^\partial_{\mr}(\orb{X}) = \frac{DT_\text{exc}^\vee(Y) DT(Y)}{DT_0(Y)}
\end{align}
where\footnote{The subscript $_\text{exc}$ stands for exceptional locus, notice that in the definition of $DT_\text{exc}$ we only sum over the classes $\beta$ such that $f_*\beta = 0$.}
\begin{align*}
DT_\text{exc}^\vee(Y) := \sum_{\substack{(\beta,n) \in N_1(Y) \oplus \mathbb{Z} \\ f_* \beta = 0}} DT_Y(-\beta,n)q^{(\beta,n)}
\end{align*}
and where we implicitly identify $F_\text{mr}N(\orb{X})$ with $F_1N(Y)$ via $\Psi$.
The rigorous meaning of this identity will be made precise in Remark \ref{technical}.
To relate the above formula with Conjectures 1 and 2 in \cite{crc} we need two more objects: the subgroup $F_0N(\orb{X})$ of the numerical K-group of $\orb{X}$ spanned by sheaves supported in dimension zero and the subgroup $F_\text{exc}N(Y) \subset F_1N(Y)$ spanned by sheaves whose support is contracted to a point by $f$.
Notice that $F_\text{exc}N(Y)$ is the image of $F_0N(\orb{X})$ via $\Psi$ and $F_\text{exc}N(Y)$ is also the image (via the inverse of the Chern character morphism) of classes $(\beta,n)$ such that $f_* \beta = 0$.
Using this observation one sees (Corollary \ref{zero=exc}) that
\begin{align}\label{conj2}\tag{$\bigstar\bigstar$}
\text{DT}^\partial_0(\orb{X}) = \frac{DT_\text{exc}(Y)DT_\text{exc}^\vee(Y)}{DT_0(Y)}
\end{align}
where
\begin{align*}
DT_\text{exc}(Y) = \sum_{\alpha \in F_\text{exc}N(Y)}DT_Y(\alpha)q^\alpha
= \sum_{\substack{(\beta,n) \in N_1(Y) \oplus \mathbb{Z} \\ f_* \beta = 0}} DT_Y(\beta,n)q^{(\beta,n)}.
\end{align*}
The formula \eqref{conj2} becomes formally identical (with the exception of the superscript $\partial$) to \cite[Conjecture 2]{crc} by using \cite[Theorem 1.1 (b)]{tomcc}.
Plugging \eqref{conj2} in \eqref{star} we obtain
\begin{align*}
\frac{\text{DT}^\partial_\text{mr}(\orb{X})}{DT_0(\orb{X})} = \frac{DT(Y)}{DT_\text{exc}(Y)}
\end{align*}
which is twin to \cite[Conjecture 1]{crc}.
\subsubsection*{A sketch of the proof} The key result is identifying the image (via $\Psi$) of $\Coh (\orb{X})$ inside $D(Y)$.
It turns out that $\Psi(\Coh(\orb{X}))$ is none other than Bridgeland's heart of perverse coherent sheaves $\Per(Y/X)$.
The relationship between $\Per(Y/X)$ and DT invariants was studied in \cite{cala} (and previously in \cite{todaflops}).
As $\O_Y \in \Per(Y/X)$ one has a \emph{perverse Hilbert scheme} $\PHilb_{Y/X}(\alpha)$ parameterising quotients of $\O_Y$ in $\Per(Y/X)$ of numerical class $\alpha$.
One can then define
\begin{align*}
DT_{Y/X}(\alpha) := \chi_\text{top}\left( \PHilb_{Y/X}(\alpha) \cap \PHilb_{\leq 1}, \nu \right)
\quad \text{ and } \quad
DT(Y/X) := \sum_{\alpha \in F_1(Y)} DT_{Y/X}(\alpha) q^\alpha.
\end{align*}
In \cite{cala} (see also \cite{todaflops}) the following relation between $DT(Y/X)$ and ordinary DT invariants was proved.\footnote{This is slightly imprecise, there is a sign issue which we explain in Remarks \ref{sign issue crc} and \ref{sign issue again crc}.}
\begin{align}\label{flopfo}\tag{$0$
DT(Y/X) = \frac{DT_\text{exc}^\vee (Y) DT(Y)}{\DT_0(Y)}
\end{align}
Finally, the Fourier-Mukai transform $\Psi$ not only identifies $\Coh(\orb{X})$ with $\Per(Y/X)$ but also the corresponding Hilbert schemes, so that we have $\Hilb_\orb{X}(\alpha) = \PHilb_{Y/X}(\psi({\alpha}))$.
Again, the technical issue mentioned earlier creeps in.
To define the perverse DT invariants one needs to pass to the open subset of $\PHilb_{Y/X}(\psi(\alpha)) \cap \PHilb_{\leq 1}(Y/X)$, where the latter is the moduli space of quotients of $\O_Y$ in $\Per(Y/X)$ which are supported in dimension at most one.
\emph{A priori}, a quotient $\O_Y \twoheadrightarrow P$ might be such that $[P] \in F_1N(Y)$ while $\dim \supp P = \dim \left(\supp H^{-1}(P) \cup \supp H^0(P)\right) > 1$.
We will come back to this in Remark \ref{remark}.
The remedy is to modify the DT invariants on the orbifold side.
For $\psi(\alpha) \in F_1N(Y)$ we define $\Hilb_\orb{X}^\partial(\alpha)$ to be the preimage under $\Psi$ of $\PHilb_{Y/X}(\psi(\alpha)) \cap \PHilb_{\leq 1}(Y/X)$.
The partial DT invariants of $\orb{X}$ are defined by taking the weighted Euler characteristics of these latter moduli spaces.
We then obtain
\begin{align*}
\text{DT}^\partial_\text{mr}(\orb{X}) = DT(Y/X)
\end{align*}
which implies \eqref{star}.
We mention in passing that the arguments contained in this paper morally work just as well in the quasi-projective setting.
Unfortunately, however, the identity \eqref{flopfo} is currently only available for $Y$ projective.
After the first version of this paper was uploaded on the arXiv, an article by David Steinberg \cite{david} also appeared.
Steinberg has been laying siege to the same conjecture using an interesting \emph{relative stable pairs} approach, which involves a different t-structure.
\subsubsection*{Acknowledgements} The author would like to thank Tom Bridgeland for precious help, Dominic Joyce for useful conversations, Arend Bayer for sharing a preliminary version of an upcoming paper and Jim Bryan and David Steinberg for sharing their ideas.
\subsubsection*{Structure of the paper} The paper is divided into two sections.
The first one is the core, as it contains the proof of the fact that $\Coh (\orb{X})$ is sent to $\Per(Y/X)$ via the derived equivalence.
In the second section we apply this result to Donaldson-Thomas invariants.
\subsubsection*{Conventions} We work over the field of complex numbers $\mathds{C}$. For a scheme (or stack) $M$, $D(M)$ will denote the \emph{bounded} derived category of coherent $\O_M$-modules.
\section{The Equivalence between $\Per(Y/X)$ and $\Coh (\orb{X})$}
\label{sec:the_equivalence_between_per_y_x_and_coh_x_}
We work in the following setup.
\begin{situ}\label{situ1}
Let $\orb{X}$ be a smooth, quasi-projective, Deligne-Mumford stack of dimension $n$.
Assume the canonical bundle $\omega_{\orb{X}}$ to be Zariski-locally trivial and denote by $X$ the coarse moduli space of $\orb{X}$.
\end{situ}
\begin{rmk}
The bundle $\omega_\orb{X}$ on $\orb{X}$ is \emph{Zariski}-locally trivial if there exists a Zariski open cover $\orb{X}' \to \orb{X}$ (where we allow $\orb{X}'$ to be a \emph{stack}) such that the restriction $\omega_\orb{X}\vert \orb{X}'$ is trivial.
This is a technical condition which, by working locally on the coarse space $X$, allows us to reduce to the setting of \cite{bkr}.
In fact, in the case where $\orb{X} = [V/G]$, it amounts to requiring that the canonical bundle of $V$ be $G$-equivariantly locally trivial.
This condition seems to be missing in \cite{chentseng}.
\end{rmk}
It is beneficial to recall the framework of \cite{chentseng}.
A candidate for a resolution of $X$ (and a replacement for the equivariant Hilbert scheme found in \cite{bkr}) is given by the irreducible component $Y$ of the Hilbert scheme $\Hilb (\orb{X})$ containing the non-stacky points of $\orb{X}$.\footnote{It is probably helpful to remark that for a stack $\orb{X}$ there might be some ambiguity in the term \emph{Hilbert scheme} (see \cite{rydh}). However, we shall always interpret Hilbert schemes as Quot functors, which for Deligne-Mumford stacks were studied by Olsson and Starr \cite{os}.}
The morphism $g\colon \orb{X} \to X$ induces a morphism $\Hilb(\orb{X}) \to \Hilb(X)$ and, by restriction, a morphism $f\colon Y \to X$. We draw a diagram.
\begin{center}
\begin{tikzpicture}
\matrix (m) [matrix of math nodes, row sep=3em, column sep=3em, text height=1.5ex, text depth=0.25ex]
{
& Y \times \orb{X} &\\
Y & &\orb{X}\\
& X &\\
};
\path[->,font=\scriptsize]
(m-2-1) edge node[auto,swap]{$f$} (m-3-2)
(m-2-3) edge node[auto]{$g$} (m-3-2)
(m-1-2) edge node[auto,swap]{$\pi_Y$} (m-2-1)
edge node[auto]{$\pi_\orb{X}$} (m-2-3)
;
\end{tikzpicture}
\end{center}
Under the additional assumption that $Y\times_X Y$ is at most of dimension $n+1$ it is proved in \cite{chentseng} that $Y$ is smooth and that $f$ is a crepant resolution.
Furthermore, the scheme $Y$ represents a moduli functor and its corresponding universal object is a quotient $\O_{Y \times \orb{X}} \twoheadrightarrow \O_\orb{Z}$.
Finally, it is shown that one has a Fourier-Mukai equivalence $D(Y) \simeq D(\orb{X})$ with kernel given by $\O_\orb{Z}$.
We recall three key results involved in the proof: the Hilbert scheme $\Hilb (\orb{X})$ commutes with \'etale base-change on $X$ \cite[Proposition 2.3]{chentseng}; \'etale-locally on $X$ the space $\orb{X}$ is isomorphic to a quotient stack $[V/G]$, with $V$ smooth and affine and $G$ a finite group (whose coarse space is thus the quotient $V/G$) \cite[Lemma 2.2.3]{vista}; the Hilbert scheme of $[V/G]$ is isomorphic to Nakamura's $G$-equivariant Hilbert scheme $G\text{-}\Hilb (V)$ \cite[Lemma 2.2]{chentseng}.
Exploiting these facts one may reduce to \cite{bkr}, as checking that the given kernel produces an equivalence may be done locally \cite[Proposition 3.3]{chentseng}.
\begin{rmk}\label{otherperversity}
As is usual with integral transforms, the kernel $\O_\orb{Z}$ may be interpreted as giving a functor in two different directions.
The standard Mukai-implies-McKay convention is to take $\O_\orb{Z}$ to define a functor ${\Phi}\colon D(Y) \to D(\orb{X})$ \cite{bkr,chentseng}.
To deal with a technical issue (caused by \cite{cala}), we will also consider $\hat{\Phi} = \mathbb{D}\Phi\mathbb{D} \colon D(Y) \to D(\orb{X})$, where $\mathbb{D} = \R\underline{\Hom}(-,\O)$ is the duality functor.
We denote by $\Psi$ the inverse of $\Phi$ and by $\hat{\Psi}$ the inverse of $\hat{\Phi}$.
When $Y$ and $X$ are projective, the relationship between $\Phi$ and $\hat{\Phi}$ is quite simple, as $\hat{\Psi}$ is given by the Fourier-Mukai transform with kernel $\O_\orb{Z}$ (this is a standard consequence of \cite[Propositions 1.13 and 1.15]{nahm}).
\end{rmk}
We now briefly remind the reader of Bridgeland's heart of perverse coherent sheaves \cite{tomflops}.
In some sense, it is a reflection of the ambiguity revolving around the kernel $\O_\orb{Z}$ that we consider both the $-1$ and $0$ perversity.
The category $\perv{p}{\Per(Y/X)}$ of \emph{perverse coherent} of \emph{perversity} $p \in \{-1,0\}$ consists of those complexes $E \in D(Y)$ satisfying
\begin{itemize}
\item $\R f_* E \in \Coh (X)$,
\item $\Ext^{-i}_Y (E, C) = 0 = \Ext_Y^{-i}(C,E)$, for all $i > p$ and all $C \in \Coh (Y)$ such that $\R f_* C = 0$.
\end{itemize}
The rest of this section is devoted to the proof of the following statement.
\begin{thmm}\label{hearts}
Assume to be working in Situation \ref{situ1} and assume in addition $f$ to have relative dimension at most one.
Then the equivalence $\Phi$ between $D(Y)$ and $D(\orb{X})$ restricts to an equivalence of abelian categories between $\perv{0}{\Per}(Y/X)$ and $\Coh (\orb{X})$, while the equivalence $\otwit{\Phi}$ restricts to an equivalence between $\perv{-1}{\Per(Y/X)}$ and $\Coh(\orb{X})$.
\end{thmm}
\begin{rmk}
Notice that the condition $\dim Y \times_X Y \leq n+1$ follows automatically from the condition on the fibres of $f$.
\end{rmk}
In particular $\perv{0}{\Per(Y/X)}$ is equivalent to $\perv{-1}{\Per(Y/X)}$.
We also point out that the composition $\otwit{\Phi}{\Phi}^{-1}$ gives a non-trivial autoequivalence of $D(\orb{X})$, which seems related to the \emph{window shifts} of Donovan-Segal \cite{window}.
It might be worthwhile to compute this equivalence in explicit examples.
Let us now begin the proof of the theorem, which will be divided into small steps.
We start by considering ${\Phi}$.
\smallskip
\stepcounter{steps}\paragraph{\pphont{Step \arabic{steps}}} Given an object of the derived category, membership of either of the categories in question can be checked \'etale-locally on $X$ \cite[Proposition 3.1.6]{vdb}.
Thus, by base-changing over \'etale patches of $X$, we can reduce to the case where $X$ is affine and furthermore $\orb{X} = [V/G]$ with $V$ a smooth affine scheme and $G$ finite.
Moreover, the functors ${\Phi}$ and ${\Psi}$ (being Fourier-Mukai) commute with this base-change \cite[Proposition 6.1]{nahm}.
\smallskip
\stepcounter{steps}\paragraph{\pphont{Step \arabic{steps}}} It suffices to prove ${\Psi}(\Coh (\orb{X})) \subset \qerv{0}{\Per}(Y/X)$ because of the following well-known trick.
\begin{lem}
Let $\curvy{A}$ and $\curvy{B}$ be two hearts relative to two bounded t-structures in a triangulated category.
Then $\curvy{A} \subset \curvy{B}$ if and only if $\curvy{B} \subset \curvy{A}.$
\end{lem}
\begin{prf}
Given an object $E$ let us denote by $H_\curvy{A}^i(E)$ (respectively $H_\curvy{B}^i(E)$) the $i$-th cohomology object relative to $\curvy{A}$ (resp.~$\curvy{B}$).
Assume $\curvy{A} \subset \curvy{B}.$
Let $E \in \curvy{B}$.
As $E$ already lies in $\curvy{B}$ we have $E \simeq H^0_\curvy{B}(E)$ and $H^i_\curvy{B}(E) = 0$ for $i \neq 0.$
Consider now the cohomology filtration of $E$ relative to $\curvy{A}$.
As objects of $\curvy{A}$ are also in $\curvy{B}$, this filtration is also a filtration relative to $\curvy{B}$.
By uniqueness of the cohomology objects we have $H^i_\curvy{A}(E) = H^i_\curvy{B}(E) = 0$ for $i\neq 0.$
Thus, $E \in \curvy{A}$.
\end{prf}
\smallskip
\stepcounter{steps}\paragraph{\pphont{Step \arabic{steps}}} To prove the mentioned inclusion we will exhibit two systems of generators (see definition below), one for $\qerv{0}{\Per}(Y/X)$ and one for $\Coh (\orb{X})$, and show that elements of the first system are sent to the second.
\begin{defn}
Let $\cat{D}$ be a triangulated category and let $\curvy{A}$ be the heart of a bounded t-structure.
A collection $\curly{P}$ of objects of $\curvy{A}$ is a \emph{system of projective generators} if, for all $A \in \curvy{A} \setminus \{0\}$ and all $P \in \curly{P}$, $\Ext_\cat{D}^\bullet(P,A)$ is concentrated in degree zero and for all $A \in \curvy{A}$ there exists $P_A \in \curly{P}$ such that $\Hom_\cat{D}(P_A,A) \neq 0$
\end{defn}
By \cite[Lemma 3.2.4]{vdb}, when $X$ is affine, we have a system of generators $\curly{P}$ for $\qerv{0}{\Per}(Y/X)$ consisting of vector bundles $P$ such that
\begin{itemize}
\item $\R^1f_*P = 0$,
\item $P^\vee$ is generated by global sections.
\end{itemize}
For $\Coh (\orb{X})$ we also have a nice system of generators.
\begin{lem}
The collection $\curly{Q}$ of vector bundles on $\orb{X}$ is a system of generators for $\Coh (\orb{X})$.
\end{lem}
\begin{prf}
As we are working in the case $\orb{X} = [V/G]$, it is easy to reduce the problem to bundles on $V$.
In fact, coherent sheaves on $\orb{X}$ are $G$-equivariant coherent sheaves on $V$.
Given an equivariant vector bundle $P$ and an equivariant sheaf $E$ on $V$ we have that $G\text{-}\Ext^i_V(P,E) = \Ext_V^i(P,E)^G$, where the latter is the $G$-invariant part \cite[Section 4.1]{bkr}.
As $V$ is affine, these groups vanish for $i>0$.
Fix now an equivariant sheaf $E$, we want to find an equivariant vector bundle $P$ such that $\Hom_V(P,E)^G \neq 0$.
By \cite[Lemma 4.1]{bkr} $\Hom_V(P,E)$ splits as a direct sum of $\Hom_V(P\otimes \rho, E)^G \otimes \rho$, where $\rho$ ranges among the irreducible representations of $G$.
The claim thus follows as $P \otimes \rho$ is a vector bundle.
\end{prf}
\stepcounter{steps}\paragraph{\pphont{Step \arabic{steps}}} We now conclude the proof by showing that elements of $\curly{P}$ are sent to elements of $\curly{Q}$.
First we remark that we can check whether a complex on $\orb{X} = [V/G]$ is a vector bundle by pulling back to the \'etale atlas $V \to [V/G]$.
Thus, if $P \in \curly{P}$, we are interested in the pullback of ${\Phi}(P)$ to $V$.
This allows us to reduce to the setup of \cite{bkr}, where one has the following diagram.
\begin{center}
\begin{tikzpicture}
\matrix (m) [matrix of math nodes, row sep=3em, column sep=3em, text height=1.5ex, text depth=0.25ex]
{&Z&\\
&Y\times V&\\
Y&&V\\
&X&\\
};
\path[->,font=\scriptsize]
(m-1-2) edge[bend right=20] node[auto,swap]{$p$}(m-3-1)
edge[bend left=20] node[auto]{$q$}(m-3-3)
(m-1-2) edge node[auto]{$i$} (m-2-2)
(m-2-2) edge node[auto,swap]{$\pi_Y$} (m-3-1)
edge node[auto]{$\pi_V$} (m-3-3)
(m-3-1) edge node[auto,swap]{$f$}(m-4-2)
(m-3-3) edge node[auto]{$g$}(m-4-2)
;
\end{tikzpicture}
\end{center}
Here $Z$ is the universal $G$-cluster for the action of $G$ on $V$, $q$ and $f$ are proper and birational, $p$ and $g$ are finite and $p$ is also flat.
Moreover, the quotient $\O_{Y \times V} \twoheadrightarrow \O_Z$ is precisely the pullback, under the morphism $Y \times V \to Y \times [V/G] = Y \times \orb{X}$, of the universal quotient $\O_{Y \times \orb{X}} \twoheadrightarrow \O_\orb{Z}$, which we used to define ${\Phi}$.
It follows that applying ${\Phi}$ followed by pulling back to $V$ is the same as applying $\R q_* p^*$.
We have thus reduced our final step to checking that, given an element $P \in \curly{P}$, the complex $\R q_* p^* P$ is actually a vector bundle.
\begin{lem}
Let $P \in \Coh(Y)$ satisfy $\R^1 f_* P = 0$.
Then $\R q_* p^* P \in \Coh(\orb{X})$.
\end{lem}
\begin{prf}
Notice that $\R q_* p^* P = \R \pi_{V,*} i_* p^* P = \R \pi_{V,*} (\pi_Y^* P \otimes \O_Z)$, where we made the standard identification $\O_Z = i_* \O_Z$.
We point out that, as a consequence of our assumption on $f$, $\pi_{V,*}$ is of homological dimension at most one (we remind the reader that we work under the reduction done in the Step 1, in particular $X$ is affine).
By tensoring the quotient $\O_{Y \times V} \twoheadrightarrow \O_Z$ with $\pi_Y^*P$ we produce a surjection $\pi_Y^*P \twoheadrightarrow \pi_Y^* P \otimes \O_Z$.
Applying $\pi_{V,*}$ yields a surjection $\R^1\pi_{V,*} \pi_Y^* P \twoheadrightarrow \R^1 \pi_{V,*} (\pi_Y^* P \otimes \O_Z)$.
But $\R^1 \pi_{V,*} \pi_Y^* P = H^1(Y,P) \otimes_\mathds{C} \O_V$ and $H^1(Y,P) = 0$ as $\R^1f_* P = 0$, hence the claim.
\end{prf}
\begin{lem}
Let $P \in \curly{P}$, then $\R q_* p^* P$ is a vector bundle on $V$.
\end{lem}
\begin{prf}
We know that the dual of $P$ is generated by global sections, hence there exists a short exact sequence
\begin{align*}
K \hookrightarrow \O^{\oplus m}_Y \twoheadrightarrow P^\vee.
\end{align*}
From the fact that $P$ and $\O_Y$ are vector bundles it follows that $K$ is also a vector bundle.
We therefore have a dual sequence
\begin{align*}
P \hookrightarrow \O^{\oplus m}_Y \twoheadrightarrow K^\vee.
\end{align*}
It follows from the previous lemma, plus the fact that $q_* \O_Z = \O_V$, that applying $\R q_* p^*$ yields an exact sequence
\begin{align*}
q_* p^* P \hookrightarrow \O^{\oplus m}_V \twoheadrightarrow q_* p^* K^\vee.
\end{align*}
To prove our claim it suffices to check that $\Ext^1_V(q_*p^*P,M) = 0$ for all modules $M$ on $V$.
By the above short exact sequence this is the same as showing that $\Ext^2_V(q_*p^* K^\vee,M) = 0$ for all modules $M$.
Using Grothendieck duality
for $q$ we have
\begin{align*}
\Ext_V^2(q_*p^* K^\vee,M)
= \Ext_Z^2(p^*K^\vee,q^!M)
= H^2(Z,p^*K \otimes q^! M).
\end{align*}
The scheme $Z$ admits a finite and flat map to a smooth variety ($f: Z \to Y$) thus it is Cohen-Macaulay.
Moreover, as $\dim Z - \dim V = 0$ and $q$ is of finite tor-dimension, the complex $q^!M$ is concentrated in non-positive degrees.
As our assumption on $f$ implies that $H^i(Z,E) = 0$ for all $i > 1$ and all sheaves $E$, the hypercohomology spectral sequence tells us that $H^2(Z,p^*K \otimes q^!M) = 0$.
Hence we are done.
\end{prf}
The previous lemma concludes the first half of the proof.
As is often the case, the second half is much shorter than the first.
In fact, to prove the statement for $\perv{-1}{\Per(Y/X)}$ and $\Phi$, one need only notice the following:
\begin{itemize}
\item $\otwit{\Phi} = \mathbb{D} {\Phi} \mathbb{D}$,
\item the dual system $\curly{P}^\vee = \mathbb{D} \curly{P}$ is a system of generators for $\perv{-1}{\Per(Y/X)}$ \cite[3.2.3]{vdb},
\item the system $\curly{Q}$ is self-dual $\mathbb{D} \curly{Q} = \curly{Q}$.
\end{itemize}
This concludes the proof and we can now move on to comparing the DT invariants of $\orb{X}$ and $Y$.
\begin{rmk}\label{o}
For the next section, it will be important to know that $\Phi(\O_Y) = \O_\orb{X}$.
We already know that $\Phi(\O_Y)$ is a vector bundle given by $\R q_*\O_\orb{Z}.$
By restricting to the smooth locus of $X$ (viz.~to an open where $\Phi$ is the identity) we see that $\R q_* \O_\orb{Z}$ is in fact a line bundle.
In turn this implies that $\Phi(\O_Y) = \O_\orb{X}$ as the unit $\O_\orb{X} \to \R q_* q^* \O_\orb{X}$ is an isomorphism.
The same statement obviously holds for $\hat{\Phi} = \mathbb{D} \Phi \mathbb{D}$ as well.
\end{rmk}
\begin{rmk}\label{usefulcommute}
It can be useful to know that when $Y$ and $X$ are projective the equivalences described above commute with pushing down to $X$.
For example, let us check that $g_* \Phi = \R f_*$.
We have $g_* \Phi = \R f_* \R p_* p^*$.
If we proved that $\R p_* \O_\orb{Z} = \O_Y$, then by the projection formula we would be done.
Thankfully, the previous remark together with Remark \ref{otherperversity} already tell us that $\R p_* \O_\orb{Z} = \Psi(\O_\orb{X}) = \O_Y$.
\end{rmk}
\section{The Formula for DT Invariants}
\label{sec:the_formula_for_dt_invariants}
We now impose further restrictions on our spaces.
\begin{situ}\label{situ2}
Recall Situation \ref{situ1} and assume in addition $\orb{X}$ to be projective and of dimension three.
Assume moreover $\orb{X}$ to be Calabi-Yau, i.e.~$\omega_\orb{X} \cong \O_\orb{X}$ and $H^1(\orb{X},\O_\orb{X})=0$.
Finally, assume the crepant resolution $f\colon Y \to X$ of the previous section to have relative dimension at most one.
\end{situ}
\begin{rmk}
We follow the convention where a Deligne-Mumford stack is projective if its coarse moduli space is.
From the assumptions above it follows that $X$ is of dimension three, projective, Gorenstein with quotient singularities and with trivial canonical bundle.
In turn it follows that $Y$ is Calabi-Yau of dimension three and that $X$ has rational singularities, and so $\R f_*\O_Y=\O_X$ \cite{kovacs}.
\end{rmk}
As the functor ${\Phi}$ is more natural from the perspective of the McKay correspondence we shall focus on the zero perversity.
\begin{notation}
We will drop the superscript $^{0}$ from $\perv{0}{\Per(Y/X)} =: \Per(Y/X)$.
\end{notation}
\addtocontents{toc}{\protect\setcounter{tocdepth}{0}}
\subsection{Reminder}\label{reminder}
Let us recall some definitions from the introduction.
We denote by $N(Y)$ the \emph{numerical K-group} of coherent sheaves of $Y$.
We remind ourselves that we can define a bilinear form on $K_0(\Coh (Y))$
\begin{align*}
\chi(E,F) := \sum_k (-1)^k \dim_\mathds{C} \Ext^k_Y(E,F)
\end{align*}
and that $N(Y)$ is obtained by quotienting out its radical.
Inside $N(Y)$ we can single out $F_1N(Y)$, which is the subgroup generated by sheaves supported in dimensions at most one.
We also define $F_{\exc}N(Y)$ to be the subgroup of $F_1N(Y)$ spanned by sheaves with derived pushforward to $X$ supported in dimension zero.
To $Y$ one can also attach the numerical Chow groups $N_*(Y)$, which are the groups of cycles modulo numerical equivalence.
We write $N_{\leq 1}(Y) := N_1(Y) \oplus N_0(Y)$ and recall that $N_0(Y) \cong \mathbb{Z}$.
In \cite[Lemma 2.2]{tomcc} it is shown that the Chern character induces an isomorphism $F_1N(Y) \cong N_{\leq 1}(Y) \cong N_1(Y) \oplus \mathbb{Z}$, which allows us to pass from one group to the other.
Using this identification, $F_{\exc}N(Y)$ can be rewritten as
\begin{align*}
F_{\exc} N(Y) = \left\{ (\beta,n)\in N_1(Y) \oplus \mathbb{Z} \,\middle\vert\, f_*\beta=0 \right\}
\end{align*}
where $f_*$ here stands for the proper pushforward on cycles (the subscript $\exc$ is short for \emph{exceptional}).
For the orbifold $\orb{X}$ we can also define a numerical K-group $N(\orb{X})$.
Inside it lies $F_0N(\orb{X})$, the subgroup spanned by sheaves supported in dimension zero.
The two Fourier-Mukai functors
\begin{align*}
\Phi: D(Y) \rightleftarrows D(\orb{X}): \Psi
\end{align*}
induce an isomorphism on the level of numerical K-groups.
\begin{align*}
\phi: N(Y) \rightleftarrows N(\orb{X}): \psi
\end{align*}
The group $F_0N(\orb{X})$ is sent isomorphically to $F_{\exc}N(Y)$ via $\psi$ and we define $F_{\mr} N(\orb{X})$ to be the image under $\phi$ of $F_1N(Y)$ (the subscript $\mr$ stands for \emph{multi-regular} \cite{crc}).
As it will be useful later, we analogously define $F_0N(Y)$ to be the subgroup spanned by sheaves supported in dimension zero.
\begin{center}
\begin{tikzpicture}
\matrix (m) [matrix of math nodes, row sep=1.5em, column sep=.7em, text height=1.5ex, text depth=0.25ex]
{
F_0N(Y) & F_{\exc}N(Y) & F_1N(Y) &\\
& F_0N(\orb{X}) & F_{\mr}N(\orb{X}) & F_1N(\orb{X}) \\
};
\path[color=white]
(m-1-1) edge node[color=black]{$\subset$} (m-1-2)
(m-1-2) edge node[color=black]{$\subset$} (m-1-3)
edge [color=black,->] node[auto]{\rotatebox{90}{$\sim$}} (m-2-2
(m-1-3) edge [color=black,->] node[auto]{\rotatebox{90}{$\sim$}} (m-2-3
(m-2-2) edge node[color=black]{$\subset$} (m-2-3)
(m-2-3) edge node[color=black]{$\subset$} (m-2-4)
;
\end{tikzpicture}
\end{center}
Before we may proceed, a technical remark is in order.
\begin{rmk}\label{sign issue crc}
As mentioned in the introduction, we think of DT invariants as weighted Euler characteristics of the Hilbert scheme of a given Calabi-Yau threefold $M$, where the weight is given by Behrend's constructible function.
The proof of the flop formula in \cite{cala} relies on the technology of motivic Hall algebras of Joyce.
One of the technical points of this approach is that the Hilbert scheme $\Hilb(M)$ has a forgetful morphism $\sigma$ to the stack of coherent sheaves $\orb{M}$.
Thus, on $\Hilb(M)$ there are two candidate constructible functions: the Behrend function $\nu_{\Hilb(M)}$ and the pullback $\mu = \sigma^* \nu_{\orb{M}}$ of the Behrend function on $\orb{M}$.
Fortunately there is a simple relationship between the two, which unfortunately introduces some signs: namely if $\O_M \twoheadrightarrow E$ is a quotient, with $E$ supported in dimension at most one then \cite[Theorem 3.1]{tomcc}
\begin{align*}
\nu_{\Hilb(M)}(\O_M \twoheadrightarrow E) = (-1)^{\chi(E)} \mu(\O_M \twoheadrightarrow E) = (-1)^{\chi(E)} \nu_{\orb{M}} (E)
\end{align*}
where $\chi(E) = \chi(\O_M,E)$ is the Euler characteristic of the sheaf $E$.
Moreover, and this is relevant to the $\text{DT}^\partial$ numbers, if $f\colon U \to Z$ is an open immersion then $\nu_U = f^*\nu_Z$.
\end{rmk}
The \emph{DT number} of $Y$ of class $\alpha \in F_1N(Y)$ is given by
\begin{align*}
\DT_Y(\alpha) := \chi_{\topp}\left( \Hilb_Y(\alpha), \nu \right)
\end{align*}
but for convenience, we give a name to the numbers obtained by weighing with $\mu$ as well, namely
\begin{align*}
\correct{\DT}_Y(\alpha) := \chi_{\topp}\left( \Hilb_Y(\alpha), \mu \right) = (-1)^{\chi(\alpha)} \DT_Y(\alpha).
\end{align*}
In the introduction we also mentioned that we package all these numbers into a generating series
\begin{align*}
\DT(Y) := \sum_{\alpha \in F_1N(Y)} \DT_Y(\alpha)q^\alpha
\end{align*}
and similarly for $\correct{\DT}$.
Recall now the category $\Per(Y/X)$ of perverse coherent sheaves from the previous section.
The structure sheaf $\O_Y$ belongs to $\Per(Y/X)$ and there is a moduli space $\PHilb(Y/X)$ parameterising quotients of $\O_Y$ in $\Per(Y/X)$ \cite[Section 6]{tomflops}.
This space splits into open and closed components $\PHilb_{Y/X}(\alpha)$, for each numerical class $\alpha$, parameterising quotients $\O_Y \twoheadrightarrow P$, with $[P] = \alpha$.
However, there is also an \emph{open} subspace $\PHilb_{\leq 1}(Y/X) \subset \PHilb(Y/X)$ parameterising quotients $\O_Y \twoheadrightarrow P$ with $\dim \supp P \leq 1$, and we define $\PHilb^\partial_{Y/X}(\alpha)$ to be the intersection $\PHilb_{\leq 1}(Y/X) \cap \PHilb_{Y/X}(\alpha) \subset \PHilb_{Y/X}(\alpha)$.
A priori, for $\alpha \in F_1N(Y)$, $\PHilb^\partial_{Y/X}(\alpha)$ might differ from $\PHilb_{Y/X}(\alpha)$.
We can define a \emph{perverse} DT number of $Y$ over $X$ of class $\alpha$ as the weighted Euler characteristic
\begin{align*}
\correct{\DT}_{Y/X}(\alpha) := \chi_{\topp}\left( \PHilb^\partial_{Y/X}(\alpha), \mu \right)
\end{align*}
where $\mu$ is the pullback of the Behrend function of the stack of perverse coherent sheaves on $Y$.
We also collect these numbers into a generating series
\begin{align*}
\correct{\DT}(Y/X) := \sum_{\alpha \in F_1N(Y)} \correct{\DT}_{Y/X}(\alpha) q^\alpha.
\end{align*}
On the orbifold side, we once again define DT numbers by taking weighted Euler characteristics and gather them in a generating series
\begin{align*}
\DT_{\mr}(\orb{X}) := \sum_{\alpha \in F_{\mr}N(\orb{X})} \DT_{\orb{X}}(\alpha) q^\alpha,
\quad \DT_{\orb{X}}(\alpha) := \chi_{\topp}\left( \Hilb_\orb{X}(\alpha), \nu \right).
\end{align*}
\begin{rmk}\label{sign issue again crc}
The analogue of Remark \ref{sign issue crc} for $\orb{X}$ still holds, that is the following identity holds
\begin{align*}
\chi_\text{top} \left( \Hilb_{\orb{X}}(\alpha), \mu \right) = (-1)^{\chi(\alpha)} \chi_\text{top}\left( \Hilb_{\orb{X}}(\alpha), \nu \right)
\end{align*}
where $\mu$ is the pullback of the Behrend function of the stack of coherent sheaves on $\orb{X}$.
To prove this, one can choose an appropriate divisor $D$ on the coarse space $X$, and its pullback to $\orb{X}$ plays the role of $H$ in the proof of \cite[Theorem 3.1]{tomcc}.
The affine $U$ can then be chosen to be an \'{e}tale open in $\orb{X}$, so that \cite[Lemma 3.2]{tomcc} can be applied.
\end{rmk}
Because of this remark, we can define the underlined version of $\DT_{\mr}(\orb{X})$ and the identity above translates to
\begin{align*}
\correct{\DT}_{\orb{X}}(\alpha) = (-1)^{\chi(\alpha)}\DT_\orb{X}(\alpha).
\end{align*}
\begin{rmk}\label{quot}
Form the previous section we know that the Fourier-Mukai equivalences $\Phi$ and $\Psi$ restrict to an equivalence of abelian categories between $\Per(Y/X)$ and $\Coh (\orb{X})$.
Using Remark \ref{o}, which tells us that $\Phi(\O_Y) = \O_\orb{X}$, we have an induced isomorphism of Quot functors (or Hilbert schemes), hence
\begin{align*}
\Hilb_\orb{X}(\alpha) \simeq \PHilb_{Y/X}(\psi(\alpha)).
\end{align*}
We define the open subspace $\Hilb^\partial_\orb{X}(\alpha)$ to agree, under this identification, with $\PHilb^\partial_{Y/X}(\psi(\alpha))$.
By taking weighted Euler characteristics we obtain the analogues $\text{DT}^\partial$, $\underline{\text{DT}}^\partial$ of the generating series for DT invariants.
\end{rmk}
\begin{rmk}\label{technical}
Before we state the theorem, we point out a technical detail.
In \cite{tomcc} the generating series $\DT(Y)$ is interpreted as belonging to an algebra $\mathds{C}[\Delta]_\Phi$ (where $\Delta \subset F_1N(Y)$ is the positive cone of classes $[E]$ with $E \in \Coh(Y)$), whose elements consist of formal series
\begin{align*}
\sum_{(\beta,n) \in \Delta \subset N_1(Y) \oplus \mathbb{Z}} a_{(\beta,n)} q^{(\beta,n)}
\end{align*}
where the $a_{(\beta,n)}$ are complex coefficients such that, for a fixed $\beta$, $a_{(\beta,n)}=0$ for $n$ very negative.
A similar interpretation is given in \cite{cala} for the generating series $\correct{\DT}(Y/X)$, which now belongs to an algebra\footnote{Here the subscripts $\Phi$ and $\Lambda$ are just notation and stand for entirely parallel constraints. Also, to be pedantic, in \cite{cala} $\mathbb{Q}$ was used in place of $\mathbb{C}$. However the latter is obtained by the former by tensoring with $\mathbb{C}$.} $\mathds{C}[\pepe{\Delta}]_\Lambda$ (where $\pepe{\Delta} \subset F_1N(Y)$ is the positive cone of classes $[P]$ with $P \in \Per(Y/X)$), which is the analogous of $\mathds{C}[\Delta]_\Phi$ for $\Per(Y/X)$.
It was remarked in \cite[Remark 3.26]{cala} that the generating series $\correct{\DT}(Y)$ can also be seen as an element of $\mathds{C}[\pepe{\Delta}]_\Lambda$ and all the identities we write down below should be interpreted as taking place within this algebra.
\end{rmk}
In light of \ref{quot} our main theorem is now immediate.
\begin{thmm}\label{main}
Assume to be working in Situation \ref{situ2}.
The following formula holds
\begin{align*}
\correct{\DT}^\partial_{\mr}(\orb{X}) = \correct{\DT}(Y/X)
\end{align*}
after an identification of variables via $\phi$.
In particular, for each $\alpha \in F_1N(Y)$
\begin{align*}
\correct{\DT}_{Y/X}(\alpha) = \correct{\DT}^\partial_\orb{X}(\phi(\alpha)).
\end{align*}
\end{thmm}
\begin{cor}\label{mr=exc}
Assume to be working in Situation \ref{situ2} and recall the identification of variables from the previous theorem.
The following formula is true
\begin{align}\label{onceagain}
\DT^\partial_{\mr}(\orb{X}) = \frac{\DT^\vee_{\exc}(Y)\DT(Y)}{\DT_0(Y)}
\end{align}
where
\begin{align*}
\DT_0(Y) &:= \sum_{n \in F_0N(Y)}\DT_Y(n)q^n, \\%\quad \text{ and} \\
\DT_\text{exc}^\vee &:= \sum_{\substack{(\beta,n) \in N_1(Y)\oplus \mathbb{Z} \\ f_*\beta = 0}} \DT_Y{(-\beta,n)} q^{(\beta,n)}.
\end{align*}
\end{cor}
\begin{prf}
First we notice that we can get rid of the underlines thanks to Remarks \ref{sign issue crc} and \ref{sign issue again crc}.
From the previous theorem the statement we wish to prove is equivalent to proving that $\correct{\DT}(Y/X)$ is equal to the right hand side of \eqref{onceagain} (modulo the underlines).
What prevents us from simply applying the formula \eqref{flopfo} from the introduction is that in \cite{cala} the result is, strictly speaking, only proved for the minus one perversity.
Nonetheless, we can verify that the context we work in here satisfies the hypothesis of \cite[Remark 1.6]{cala}.
The only thing to prove is that the stack of perverse coherent sheaves is locally isomorphic to the stack of coherent sheaves (via an \emph{exact} functor).
However, using the Fourier-Mukai equivalence $\otwit{\Phi}$, we have that the stack parameterising objects in $\perv{0}{\Per(Y/X)}$ is isomorphic to the stack parameterising objects in $\perv{-1}{\Per(Y/X)}$.
As $\otwit{\Phi}$ is also an exact functor, all the constructions of \cite{cala} go through and \eqref{flopfo} does indeed hold.
\end{prf}
Finally, we reformulate our formula paralleling the Crepant Resolution Conjecture of \cite{crc}.
\begin{cor}\label{zero=exc}
Once again, we assume to be working in Situation \ref{situ2}, while also bearing in mind the identification of variables from the previous theorem.
The following formulae hold.
\begin{align}\label{crepe1}
\DT^\partial_0(\orb{X}) = \frac{\DT_{\exc}^\vee(Y) \DT_{\exc}(Y)}{\DT_0(Y)}
\end{align}
\begin{align}\label{crepe2}
\frac{\DT^\partial_{\mr}(\orb{X})}{\DT^\partial_0(\orb{X})} = \frac{\DT(Y)}{\DT_{\exc}(Y)}
\end{align}
where
\begin{align*}
\text{DT}^\partial_0(\orb{X}) := \sum_{\alpha \in F_0N(\orb{X})} \text{DT}^\partial_\orb{X}(\alpha) q^\alpha, \quad\text{ and }\quad
\DT_{\exc}(Y) := \sum_{\substack{(\beta,n) \in N_1(Y) \oplus \mathbb{Z} \\ f_* \beta = 0}} \DT_Y(\beta,n)q^{(\beta,n)}.
\end{align*}
\end{cor}
\begin{prf}
As previously mentioned, $\psi$ identifies $F_0N(\orb{X})$ with $F_{\exc}N(Y)$, from which we deduce the first identity.
The second is obtained by combining the first identity with \eqref{onceagain}.
\end{prf}
\begin{rmk}\label{remark}
The actual Conjectures 1 and 2 of \cite{crc} are \eqref{crepe2} and \eqref{crepe1} where the $\partial$ symbols do not appear.
The similarity between these is so striking that there must be a hidden relationship lurking in the background.
First off, at the time of writing the author does not know whether or not the inclusion $i\colon \PHilb^\partial_{Y/X}(\alpha) \subset \PHilb_{Y/X}(\alpha)$ is strict.
If $i$ turned out to be the identity then $\text{DT}^\partial = \DT$ and all formulae match up with \cite{crc}.
Secondly (and very unlikely) there might be a corner of the universe of hard Lefschetz CY3 orbifolds where the conjectures in \cite{crc} break down and must be replaced.
The third (and perhaps more likely) hypothesis may be that $i$ is generally a strict inclusion but the two moduli spaces,
as elements in the Hall algebra of $\Per(Y/X)$, are sent to one another by an automorphism which becomes the identity under the integration map.
This might be related to the Bryan-Steinberg \emph{relative stable pair} invariants of \cite{david}.
Steinberg constructs moduli spaces $f\text{-}\Hilb(Y/X)$, which morally parameterise quotients of $\O_Y$ in yet another heart in $D(Y)$.\footnote{We use the word \emph{morally} as when $f$ contracts a divisor to a curve this is no longer true. On the other hand, when $f$ \emph{is} a small resolution then one does get another heart, which turns out to be a tilt of $\Per(Y/X)$. In this (much simpler!) setting the formula for BS invariants can be proved using \cite{cala}.}
Taking weighted Euler characteristics one has new invariants $\text{BS}(Y/X)$ and the main result of \cite{david} is the formula
\begin{align*}
\text{BS}(Y/X) = \frac{\DT(Y)}{\DT_\text{exc}(Y)},
\end{align*}
cfr.~\eqref{crepe2}.
One would hope $\text{BS}(Y/X)$ to match up with the PT invariants of $\orb{X}$, which through an orbifold DT/PT correspondence (announced by Arend Bayer a few years ago) would yield the crepant resolution conjecture.
Unfortunately, even in the local $\text{B}{\mathbb{Z}_2}$-gerbe case of \cite{crc} there are relative stable pairs which are not sent to stable pairs on the orbifold under the Fourier-Mukai equivalence.
It is reasonable to suspect that this issue is connected to the appearance of $\partial$ in our formulae.
At any rate, it would at least be worthwhile to describe the moduli space $\Hilb_\orb{X}^\partial$ intrinsically in terms of $\orb{X}$.
\end{rmk}
|
{
"timestamp": "2014-03-20T01:01:40",
"yymm": "1206",
"arxiv_id": "1206.6524",
"language": "en",
"url": "https://arxiv.org/abs/1206.6524"
}
|
\section{Introduction}
In this paper, we compute reachable sets of differential inclusions,
\begin{equation}\label{di1}
\dot x(t)\in F(x(t)),\,\,x(0)=x_0,
\end{equation}
\noindent where $F$ is a continuous set-valued map with compact and convex values.
A solution of the differential inclusion~\eqref{di1} is an
absolutely continuous function $x:[0,T]\rightarrow\mathbb{R}^n$, such that for almost all $t\in [0,T]$,
$x(\cdot)$ is differentiable at $t$ and $\dot x(t)\in F(x(t))$.
The solution set $S_T(x_0) \subset C([0,T],\mathbb{R}^n)$ is defined as
\begin{multline*}
S_T(x_0)=\{x(\cdot) \in C([0,T],\mathbb{R}^n) \,\mid\, x(\cdot)\text{ is a solution of } \dot x(t)\in F(x(t)) \text{ with } x(0)=x_0 \}.
\end{multline*}
The \emph{reachable set} at time $t$, $R(x_0,t) \subset \mathbb{R}^n$, is defined as
\begin{equation*}
R(x_0,t)=\{x(t)\in\mathbb{R}^n\,| x(\cdot) \in S_t(x_0) \} .
\end{equation*}
\noindent In particular, we are interested in higher-order method for computation
of a rigorous over-approximation of the reachable set
of a differential inclusion.
Differential inclusions are generalization of differential equations
having multivalued right-hand sides, see \cite{AC}, \cite{De}, \cite{S}.
They give a mathematical setting for studying differential equations
with discontinuous right-hand sides. In fact, taking a closed, convex hull of the right-hand side,
one obtains a differential inclusion. Solutions of this differential inclusion are known as Fillipov solutions of the original differential equation; see \cite{F}.
One important application area for differential inclusions is control theory.
Suppose we are given an interval $[0,T]$, and absolutely continuous function $x(\cdot)$
which satisfies the inclusion~\eqref{di1}, where $F(x)= f(x,U)=\bigcup_{u\in U} f(x(t),u)$ for almost all $t\in [0,T]$.
It is known that if the set $U$ is compact and separable,
$f$ is continuous, and $f(x,U)$ is convex for all $x$, then there exists a bounded measurable function $u(t)\in U$,
known as admissible control input, such that $x(t)$ is the solution of the control system,
\begin{equation}\label{controlsystem}
\dot x(t) = f(x(t),u(t)),\,\,\,x(0)=x_0.
\end{equation}
\noindent The proof of the above is given in \cite{AC}, and with slight changes
in the assumptions, also in \cite{N} or \cite{Li}.
On the other hand, it is easy to see that each solution of
a control system~\eqref{controlsystem} for a given admissible control input
is also a solution of a differential inclusion~\eqref{di1}.
Therefore, if a control system is not completely controllable
one may want to compute reachable sets corresponding to all possible inputs ($u(t)\in U$)
which is equivalent to computing a reachable set of a differential inclusion.
Similarly, we obtain a differential inclusion from a noisy system of differential equations
\begin{equation}\label{noisysystem}
\dot x(t) = f(x(t),v(t)),\quad x(0)=x_0,\quad v(t)\in V.
\end{equation}
Although the form of~\eqref{controlsystem} and~\eqref{noisysystem} are identical, the interpretation is different;
in~\eqref{controlsystem}, the input $u(t)$ can be chosen by the designer, whereas in~\eqref{noisysystem}, the input is determined by the environment.
Differential inclusions can also arise as reduced models of high-dimensional systems of differential equations.
For example, suppose we have a large-scale system given in the form
of differential equation $\dot x(t)=f(x(t))$. In general, it is very
hard to analyse large-scale systems and most of the times performing model reduction
is necessary. This gives a simplified model in the form of $\dot z(t) = h(z(t)) + e(t)$,
where $|e(t)|< \epsilon$ represents the error that occurred while simplifying the model.
For reliability purposes many engineering systems require availability of
verification tools. In order to verify a system, we must
guarantee that an approximate solution will contain the actual solution of the system.
If there is uncertainty in the system, lack of controllability, or just a variety of available dynamics,
one needs to use differential inclusion models.
For verification purposes, one needs to compute over-approximations to the set of solutions.
An important tool in the study of input-affine control systems~\eqref{controlsystem} is based on the Fliess
expansion~\cite{Fl}, in which the evolution over a time-step $h$ is expanded as a power-series in integrals of the input.
A numerical method based on this approach was given in~\cite{GK}.
The method cannot be directly applied to study noisy systems~\eqref{noisysystem}, since for this problem we
need to compute the evolution over all possible inputs, and this point is only briefly addressed.
The first result on the computation of the solution set of a differential inclusion
was given in~\cite{PBV}, who considered Lipschitz differential inclusions,
and gave a polyhedral method for obtaining an approximation
of the solution set $S(x_0)$ to an arbitrary known accuracy.
In the case where $F$ is only upper-semicontinuous with compact, convex values,
it is possible to compute arbitrarily accurate over-approximations to the solution set,
as shown in~\cite{CG}.
Some different techniques and various types of numerical methods have been proposed
as approximations to the solution set of a differential inclusion. For example,
ellipsoidal calculus was used in \cite{KV}, a Lohner-type algorithm
in \cite{KZ}, grid-based methods in \cite{PBV} and \cite{BR}, optimal control
in \cite{BM} and discrete approximations in \cite{DL,Dt,DF}, \cite{G}.
However, these algorithms either do not give rigorous over-approximations, or are
approximations of low-order (e.g. Euler approximations with a first-order single-step truncation error).
Essentially, the only algorithms mentioned above that could give arbitrary accurate error estimates are the ones that use grids.
However, higher order discretization of a state space greatly
affects efficiency of the algorithm. It was noted in \cite{BR} that if one is trying to obtain higher order
error estimates on the solution set of differential inclusions then grid methods should be avoided.
In order to provide an over-approximation of the reachable set of (\ref{di1}),
we compute solutions of an ``approximate'' system
\[
\dot y(t)= f(y(t),w_k(t)), \quad y(t_k)=x(t_k), \ w_k(\cdot)\in W,
\]
\noindent for $t\in [t_k,t_{k+1}]$, and add the uniform error bound on the difference of the two solutions.
We provide formulas for the local error based on Lipschitz constants and bounds on higher-order derivatives.
The method is based on a Fliess-like expansion, and extends the results of~\cite{GK} by providing error estimates which are valid for all possible inputs.
We can obtain improved estimates by the use of the logarithmic norm.
The logarithmic norm was introduced independently in \cite{D}, and \cite{L} in order to derive
error estimates to initial value problems, see also \cite{Sg}. Using the logarithmic norm
is advantageous over the use of Lipschitz constant in the sense that the logarithmic norm
can have negative values, and thus, one can distinguish between forward and reverse time integration,
and between stable and unstable systems. The definition of the logarithmic norm and
a theorem on the logarithmic norm estimate is given in Section \ref{Prel}.
The numerical result given in Section \ref{num} were obtained using the function calculus
implemented in the tool Ariadne~\cite{A} for reachability analysis and verification of hybrid systems.
In particular, we use \emph{polynomial models} for the rigorous approximation of continuous functions.
Polynomial model expresses approximations to a function in the form of a polynomial (defined over a suitably small domain) plus an interval remainder, and are essentially the same as the \emph{Taylor models} of~\cite{RBM}.
The paper is organized as follows. In Section \ref{Prel}, we give key ingredients
of the theory used. In Section \ref{Di}, we give mathematical setting for obtaining
over-approximations of the reachable sets of a differential inclusion, and propose an algorithm.
In Section \ref{IAS}, we consider differential inclusions in the form of input-affine systems. We derive
the local error, give formulas for obtaining the error of second and third orders, and
show how to obtain the error of higher-orders. We extend the idea of obtaining over-approximations
for input-affine systems to more general
differential inclusions in Section \ref{GDI}. A
numerical example is given in Section \ref{num}.
We conclude the paper with a discussion on the theory proposed in Section \ref{Disc}.
\section{Preliminaries}\label{Prel}
Below we give several results on differential inclusions and
the computability of their solutions. For further work on the theory of differential inclusions see
\cite{AC}, \cite{De}, \cite{S}, for computability theory see \cite{W}, and for results on computability
of differential inclusions see \cite{PBV}, \cite{CG}.
We canonically use the supremum norm for the vector norm in $\mathbb{R}^n$, i.e.,
for $x \in \mathbb{R}^n$, $\|x\|_\infty=\max \{|x_1|,...,|x_n|\}$.
The corresponding norm for functions $f: D\subset \mathbb{R}^n \rightarrow \mathbb{R}$ is $\|f\|_\infty = \sup_{x\in D}\|f(x)\|_\infty$.
The corresponding matrix norm is
\[
\|Q\|_{\infty} = \max_{k=1,...,n} \Bigl \{ \sum_{i=1}^n |q_{ki}| \Bigr\}.
\]
Given a square matrix $Q$ and a matrix norm $\|\cdot\|$, the corresponding \emph{logarithmic norm} is
\[
\lambda(Q)= \lim_{h\rightarrow 0^+} \frac{\|I + hQ\|-1}{h} .
\]
\noindent There are explicit formulas for the logarithmic norm for several matrix norms,
see \cite{HNW}, \cite{D}. The formula for the logarithmic norm corresponding to the uniform matrix norm that we use is
\[
\lambda_\infty(Q) = \max_k \{ q_{kk} + \sum_{i\neq k} |q_{ki}| \}.
\]
The following theorem on existence of solutions of differential inclusions and its proof can be
found in \cite{De}. Also, a version of the theorem and its proof can be found in \cite{AC}.
\begin{theorem}
Let $D\subset\mathbb{R}^n$ and $F:[0,T]\times D\rightrightarrows \mathbb{R}^n$
be an upper semicontinuous set-valued mapping, with non-empty, compact and convex values.
Assume that $\|F(t,x))\|\le c(1+\|x\|)$, for some constant $c$, is satisfied on $[0,T]$.
Then for every $x_0 \in D$, there exists an
absolutely continuous function $x:[0,T] \rightarrow \mathbb{R}^n$, such that
$x(t_0)=x_0$ and $\dot x(t)\in F(t,x(t))$ for almost all $t\in [0,T]$.
\end{theorem}
\smallskip
\noindent A result on upper-semicomputability of differential inclusions was presented in~\cite{CG}.
\begin{theorem}
Let $F$ be an upper-semicontinuous multivalued function with compact and
convex values. Consider the initial value problem
$\dot x\in F(x)$, $x(0) = x_0$,
where F is defined on some open domain $V\subset \mathbb{R}^n$. Then the solution operator
$x_0 \mapsto S_T(x_0)$ is upper-semicomputable in the following sense:
– Given an enumerator of all tuples $(L,M_1,...,M_m)$ such that $F(\bar{L})\subset \cup_{i=1}^{m} M_i$, it is possible to enumerate all tuples $(I, J,K_1, . . .,K_k)$
where $I,K_1, . . . , K_m$ are open rational boxes and $J$ is an open rational interval
such that for every $x_0 \in I$, every solution $\xi$ with $\xi(0) = x_0$ satisfies
$\xi(\bar{J})\subset \cup_{i=1}^k K_i$.
\end{theorem}
In other words, it is possible to approximate the reachable sets arbitrarily accurately given a description of the differential inclusion and an arbitrarily accurate description of the initial state.
\smallskip
The basic construction of our algorithm is based on the following theorem. The theorem and the proof can be found
in \cite[Corollary 1.14.1]{AC}.
\begin{theorem}
Let $f:X\times U\rightarrow X$ be continuous where $U$ is a compact separable
metric space and assume that there exists an interval $I$ and an absolutely continuous
$x:I\rightarrow \mathbb{R}^n$, such that for almost all $t\in I$,
\[
\dot x(t)\in f(x(t),U).
\]
\noindent Then there exists a Lebesgue measurable $u:I\rightarrow U$ such that for almost all $t\in I$,
\[
\dot x(t)= f(x(t),u(t)).
\]
\end{theorem}
\smallskip
We shall need the multidimensional mean value theorem,
which can be found in standard textbooks on real analysis,
e.g., see \cite{Wa}. We use the following form of the theorem.
\begin{theorem}\label{MVT}
Let $V\subset \mathbb{R}^n$ be open, and suppose that $f:\mathbb{R}^n \rightarrow \mathbb{R}^m$
is differentiable on V. If $x,x+h\in V$ and $L(x;x+h)\subseteq V$, i.e., line between $x$ and $x+h$
belongs to $V$,
\[
f(x+h)-f(x) = \int_{0}^{1} Df(z(s)) ds\,\cdot h\,
\]
\noindent where $Df$ denotes Jacobian matrix of $f$, $z(s)=x+sh$, and integration is understood component-wise.
\end{theorem}
\smallskip
The following theorem on the logarithmic norm estimate is taken from \cite{HNW}.
\begin{theorem}\label{lnt}
Let $x(t)$ satisfy differential equation $\dot x(t)=f(t,x(t))$ with $x(t_0)=x_0$, where
$f$ is Lipschitz continuous. Suppose that
there exist functions $l(t)$, $\delta(t)$ and $\rho$ such that
$\lambda(Df(t,z(t)))\le l(t)$ for all $z(t)\in \mathrm{conv}\{x(t),y(t)\}$ and
$\|\dot y(t) - f(t,y(t))\|\le \delta(t)$, $\|x(t_0)-y(t_0)\|\le \rho$.
Then for $t\ge t_0$ we have
\[
\|y(t)-x(t)\| \le e^{\int_{t_0}^t l(s)ds}\left( \rho + \int_{t_0}^{t} e^{-\int_{t_0}^s l(r)dr} \delta(s) ds \right).
\]
\end{theorem}
\smallskip
In order to numerically compute the reachable set of a differential inclusion, we need a rigorous way of computing with sets and functions in Euclidean space.
A suitable calculus is given by the \emph{Taylor models} defined in~\cite{MB}:
\begin{definition}\label{tm}
Let $f: D \subset \mathbb{R}^v\rightarrow \mathbb{R}$ be a function
that is $(n+1)$ times continuously partially differentiable on an open set
containing the domain $D$. Let $x_0$ be a point in $D$ and $P$ the $n$-th
order Taylor polynomial of $f$ around $x_0$. Let $I$ be an interval such
that
Then $(p,I)p$ is called a Taylor model for $f$ if
\[
f(x) - p(x-x_0) \in I \text{ for all } x\in D
\]
Then we call the pair $(P, I)$ an $n$-th order Taylor model of $f$ around $x_0$ on $D$.
\end{definition}
In Ariadne, we allow arbitrary polynomial approximations, and not just those defined by the Taylor series.
We take $p$ to be a polynomial on the unit domain $[-1,+1]^v$, and pre-compose $p$ by the inverse of
the affine scaling function $s:[-1,+1]^v\rightarrow D$ with $s_i(z_i)=r_i z_i+m_i$.
Instead of using an interval bound for the difference between $f$ and $p$, we take a positive error bound $e$.
We say $(s,p,e)$ is a \emph{scaled polynomial model} for $f$ on the box domain $D$ if $s:[-1,+1]^v\rightarrow D$ is an affine bijection and
\[ \sup_{x\in D} |f(x)-p(s^{-1}(x))| \leq e . \]
In the special case $D=[-1,+1]^v$, the unit box, we speak of a \emph{unit polynomial model} $(p,e)$ satsfying
\( \sup_{z\in [-1,+1]^v} |f(z)-p(z)| \leq e . \)
We use the notation $p\circ s^{-1}\pm e$ to denote the polynomial model $(s,p,e)$.
Polynomial models support a complete function calculus, including the usual arithmetical operations, algebraic and transcendental functions.
Formally, if $\mathrm{op}$ is an operator on functions, then there is a corresponding operator $\widehat{\mathrm{op}}$ on polynomial models satisfying the property that if $\hat{f}_i$ are polynomial models for $f_i$, $i=1,\ldots,n$ on common domain $D$, then $\widehat{\mathrm{op}}(\hat{f}_1,\ldots,\hat{f}_n)$ is a polynomial model for $\mathrm{op}(f_1,\ldots,f_n)$ on $D$.
A full description of polynomial models as used in Ariadne is given in~\cite{CNR}.
For the calculuations described in this paper, it is sufficient to consider sets of the form $S=f(D)$ for $D=[-1,+1]^m$ and $f:\mathbb{R}^m\rightarrow\mathbb{R}^n$.
If $p_i\pm e_i$ are unit polynomial models for $f_i$, then
\[\begin{aligned}
S \subset \widehat{S} &= p([-1,+1]^m) \pm e \\
&\qquad =\{ x \in \mathbb{R}^n \mid x_i = p_i(z) + d_i \text{ for some } z\in[-1,+1]^m \text{ and } d\in\mathbb{R}^n,\ |d_i|\leq e_i \} .
\end{aligned}\]
Here, $p:[-1,+1]^m\rightarrow \mathbb{R}^n$ is the polynomial with components $p_i$, and $\pm e$ is the set $\prod_{i=1}^{m}[-e_i,+e_i]$.
The set $\widehat{S}$ is an \emph{over-approximation} to $S$.
Note that by defining polynomials $q_i(z,w)=p_i(z)+e_i w_i$, we have
\[ \widehat{S} = p([-1,+1]^m) \pm e \subset q([-1,+1]^{m+n}) \]
yielding an over-approximation as the polynomial image of the unit box without error terms.
\section{Approximation Scheme}\label{Di}
We consider differential inclusions in the form of noisy differential equations
\begin{equation}\label{cp}
\dot x(t) = f(x(t), v(t)),\,\,\, v(t)\in V,
\end{equation}
\noindent where $x:\mathbb{R}\rightarrow \mathbb{R}^n$, $v(\cdot)$ is a bounded measurable function,
$V\subset \mathbb{R}^m$ is a compact convex set, $f$ is continuous and $f(x,V)$ is convex for all $x\in\mathbb{R}^n$.
In order to compute an over-approximation to the reachable set of~\eqref{cp}, we compute
solution set of a different (an approximate) differential equation and
add the uniform error bound on the difference of the two solutions.
\subsection{Single-step approximation}\label{Step}
Given an initial set of points $X_0$, define
\begin{equation}\label{rch}
R(X_0,t)=\{x(t) \mid x(\cdot)\, \textrm{is a solution of~\eqref{cp} with}\; x(0)\in X_0\}
\end{equation}
\noindent as the reachable set at time $t$.
Let $[0,T]$ be an interval of existence of (\ref{cp}). Let $0=t_0,\, t_1,\, \ldots,\, t_{n-1}, t_n=T$
be a partition of $[0,T]$, and let $h_k=t_{k+1}-t_k$.
For $x\in\mathbb{R}^n$ and $v(\cdot)\in L^\infty([t_k,t_{k+1}];\mathbb{R}^m)$,
define $\phi(x_k,v(\cdot))$ to be the point $x(t_{k+1})$ which is the value at time $t_{k+1}$ of the solution of~\eqref{cp} with $x(t_k)=x_k$.
At each time step we want to compute an over-approximation $R_{k+1}$ to the set
\begin{equation*}
\mathrm{reach}(R_k,t_{k},t_{k+1})=\{ \phi(x_k,v(\cdot)) \mid x_k\in R_k\text{ and } v(\cdot)\in L^\infty([t_k,t_{k+1}];\mathbb{R}^m) \} .
\end{equation*}
Since the space of bounded measurable functions is infinite-dimensional, we aim to approximate the set of all solutions by
restricting the disturbances to a finite-dimensional space.
Consider a set of approximating functions $W_k\subset C([t_k,t_{k+1}];\mathbb{R}^m)$ parameterized as $W_k=\{w(a_k,\cdot)\,|\,a_k\in A\subset\mathbb{R}^p\}$, such as $w(a_k,t) = a_{0k} + a_{1k} (t-t_{k+1/2}) / h_k$ where $t_{k+1/2} = t_k+h_k/2 = (t_k+t_{k+1})/2$.
We then need to find an error bound $\epsilon$ such that
\begin{equation}\label{eps}
\forall\,v_k\in L^\infty([t_k,t_{k+1}];V),\ \exists\,a_k\in A \text{ s.t. } \| \phi(x_k,v_k(\cdot)) - \phi(x_k,w(a_k,\cdot)) \| \leq \epsilon_k .
\end{equation}
Note that we do not need to find explicitly infinitely many $a_k$'s. Instead we need to choose the correct dimension ($\mathbb{R}^p$) and provide bounds on them to get desired error $\epsilon_k$ .
Setting $\tilde{\phi}(x_k,a_k)=\phi(x_k,w(a_k,\cdot))$, i.e., $\tilde{\phi}$ also denotes the solution
of $\dot x(t)=f(x_k,w(a_k,\cdot))$, with $x(t_k)=x_k$, at $t=t_{k+1}$, we obtain the over-approximation
\begin{equation*}
R_{k+1} = \{ \tilde{\phi}(x_k,a_k) + [-\epsilon_k, \epsilon_k]^n \mid x_k\in R_k \text{ and } a_k\in A\}.
\end{equation*}
\noindent Define the approximate system at time step $k$ by
\begin{equation}\label{cpa}
\dot y(t) = f(y(t), w_k(a_k,t)), \,\, y_k=y(t_k), \ t\in[t_k,t_{k+1}].
\end{equation}
\noindent We would like to choose ``approximating'' functions $w_k=w(a_k,\cdot):[t_k,t_{k+1}]\rightarrow \mathbb{R}$,
depending on $x(t_k)$ and $v(\cdot)$,
such that the solution of (\ref{cpa}) is an approximation of high order to the solution of (\ref{cp}).
The desired local error for this paper is at least of $O(h^3)$.
Then we can expect the global error (cumulative error for the time of computation, $[0,T]$) to be roughly of $O(h^2)$.
Without loss of generality, we assume that $x(t_k)=y(t_k)$ for all
$k\ge 0$. To be precise, initially, we assume $x(t_0)=y(t_0)$.
After obtaining an over-approximation $R_1$, to the solution set at
time $t_1$, we use $R_1$ at the set of initial points of both the original
system~\eqref{cp} and its approximation~\eqref{cpa}
for the next time step. Thus we have $x(t_1)=y(t_1)\in R_1$. We compute $R_2$, and consider it to be
the set of initial points for both equations at time $t_2$.
Proceeding like this, we have $x(t_k)=y(t_k)$, for all $k\ge0$.
The local error for a time-step consists of two parts. The first part is the analytical error
given by~\eqref{eps}. The second part is the numerical error which is an interval remainder of the
polynomial model~(see Definition \ref{tm}) representing the solution $\tilde{\phi}(x_k,a_k)$ of $\dot{x}(t)=f(x(t),w_k(a_k,t))$.
We represent the time-$t_k$ reachable set $R_k=\{h_k(s)\,+\, [-\varepsilon_k,\varepsilon_k]^n \, |\, s\in[-1,+1]^{p_k}\}$,
as a polynomial model whose remainder consists of both numerical and analytical error.
Here, $p_k$ is the number of parameters used in the description of $R_k$.
The inclusion $R(X_0,t_k)\subseteq R_k$ is guaranteed by this approximation scheme.
\medskip
Note that our method only
guarantees a local error of high order at the sequence of rational points $\{t_k\}$ which is {\it a priori} chosen. If one is trying to estimate the error at times $t_k<t<t_{k+1}$ for any $k$ along a \emph{particular} solution, a different formula should be used such as a logarithmic norm estimate based on Theorem~\ref{lnt}.
\subsection{Algorithm for Computing the Reachable Set}\label{Algo}
In this section we present an algorithm for computation of the solution set of~\eqref{di1},
using the single step computation presented earlier.
\begin{algorithm}\mbox{}
Let $R_k=\{ h_k(s)\pm e_k \mid s\in[-1,+1]^{p_k} \}$ be an over-approximation of the set $R(X_0,t_k)$.
To compute an over-approximation $R_{k+1}$ of $R(X_0,t_{k+1})$:
\begin{enumerate}
\item\label{flwstp} Compute the flow $\tilde{\phi}_k(x_k,a_k)$ of
\[
\dot x(t) = f(x(t),w_k(a_{k},t)),\,\,x(t_k)=x_k,
\]
for $t\in [t_k,t_{k+1}]$, $x_k \in R_k$, and $a_k\in A$.
\item\label{errstp} Compute the uniform error bound $\varepsilon_k$ for the error of approximating $\dot{x}=f(x(t),v(t))$ by $\dot{x}=f(x(t),w(t))$.
\item\label{aplystp} Compute the set $R_{k+1}$ which over-approximates $R(x_0,t_{k+1})$ as $R_{k+1} \supset \{\tilde{\phi}(x_k,a_k)\pm \epsilon_k\,|
\, x_k\in R_k,\, a_k\in A\}$.
\item\label{redstp} Reduce the number of parameters (if necessary).
\item\label{spltstp} Split the new obtained domain (if necessary).
\end{enumerate}
\end{algorithm}
Step~\ref{flwstp} of the algorithm produces an approximated flow
in the form $\tilde{\phi}_k(x_k,a_k) \approx \phi(x_k,w(a_k,\cdot))$
which is guaranteed to be valid for all $x_k\in R_k$.
In practice, we cannot represent $\tilde{\phi}$ exactly, and
instead use polynomial model approximation with guaranteed error bound $\hat{\phi}$.
In Step~\ref{errstp}, we add the uniform error bound $\varepsilon_k$ to make sure an
over-approximation is achieved. In Step~\ref{aplystp}, we compute a new approximating set by applying the
approximated flow to the initial set of points to obtain a solution set
$R_{k+1}=\{ \hat{\phi}(h(s_k)\pm e_k,a_k)\pm\varepsilon_k \}$.
Steps~\ref{redstp} and~\ref{spltstp} are crucial for the efficiency and the accuracy of the algorithm, as explained below.
It is important to notice that the number of parameters ($a_k$ initially)
grows over the time steps. At each time-step, the number of parameters doubles, unless certain reduction
of parameters is applied. The easiest way to reduce the number of parameters is to replace
the parameter dependency by a uniform error,
but this can have a negative impact on the accuracy.
Another way to reduce number of parameters is using orthogonalization,
though this is only possible for affine approximations using currently known methods.
It is also of importance to realize that if the approximating set becomes too large,
it may be hard to compute ``good'' approximations to the flow and/or the error.
In this case, we can split the set into smaller pieces, and evolve each piece separately.
This can improve the error, but is of exponential complexity in the state-space dimension.
\section{Input-Affine Systems}\label{IAS}
In this section, we restrict attention to the input-affine system
\begin{equation}\label{ca}
\dot x(t) = f(x(t)) + \sum_{i=1}^{m} g_i(x(t)) v_i(t); \quad x(t_0)=x_0.
\end{equation}
\noindent For some $r\ge 1$ which depends on the desired order, we assume that
\begin{itemize}
\item $f:\mathbb{R}^n\rightarrow \mathbb{R}^n$ is $C^r$ function,
\item each $g_i:\mathbb{R}^n\rightarrow \mathbb{R}^n$ is $C^r$ function,
\item $v_i(\cdot)$ is a measurable function such that
$v_i(t) \in [-V_i,+V_i]$ for some $V_i > 0$.
\end{itemize}
Then the equation~\eqref{cpa} becomes
\begin{equation}\label{caa}
\dot y(t) = f(y(t)) + \sum_{i=1}^{m} g_i(y(t))w_i(a_k,t); \quad y(t_k)=y_k, \ t\in[t_k,t_{k+1}].
\end{equation}
In what follows, we assume that we have a bound $B$ on the solutions of (\ref{ca}) and (\ref{caa}) for all $t\in [0,T]$.
We take constants $V_i$, $K$, $K_i$, $L$, $L_i$, $H$, $\Lambda$ such that
\begin{equation}\label{bounds}
\begin{gathered}
|v_i(\cdot)|\le V_i,\,\,\|f(z(t))\|\le K,\,\,\|g_i(z(t))\|\le K_{i}\,\, \lambda(Df(\cdot))\le \Lambda,\\[\jot]
\|Df(z(t))\| \le L,\,\,\|Dg_i(z(t))\|\le L_i,\,\, \|D^2f(z(t))\| \le H,\,\,\|D^2g_i(z(t))\| \le H_i, \\[\jot]
\end{gathered}
\end{equation}
\noindent for each $i=1,...,m$, and for
all $t\in [0,T]$, and $z(\cdot)\in B$. We also set
\[ K'={\displaystyle\sum_{i=1}^{m}} V_i\,K_i, \ \ L'={\sum_{i=1}^{m}} V_i\,L_i \ \ H'={\sum_{i=1}^{m}} V_i\,H_i. \]
Here, $Df$ denotes the Jacobian matrix, $D^2 f$ denotes the Hessian matrix,
and $\lambda(\cdot)$ denotes the logarithmic norm of a matrix defined in Section \ref{Prel}.
We proceed to derive higher order estimates on the error by considering several different
cases. In each of the cases, $w_i(a,\cdot)$ is a real valued
finitely-parametrised function with $a\in A\subset \mathbb{R}^N$.
In general, the number of parameters $N$ depends on the number of inputs and the order of error desired.
In what follows, we write $h_k=t_{k+1}-t_k$, $t_{k+1/2}=t_k + h_k/2 = (t_k+t_{k+1})/2$, and $\hat{q}(t) = \int_{t_k}^{t} q(s)\,ds$.
\subsection{Error derivation}\label{errde}
The single-step error in the difference between $x_{k+1}$ and $y_{k+1}$
is derived as follows. Writing~\eqref{ca} and ~\eqref{caa} as integral equations, we obtain:
\begin{subequations}\label{picard}
\begin{align}
x(t_{k+1}) &= x(t_k) + \int_{t_k}^{t_{k+1}} f(x(t)) + \sum_{i=1}^{m} g_i(x(t)) v_i(t)\,dt ; \\
y(t_{k+1}) &= y(t_k) + \int_{t_k}^{t_{k+1}} f(y(t)) + \sum_{i=1}^{m} g_i(y(t)) w_i(t)\,dt .
\end{align}
\end{subequations}
Since we can take $x(t_k) = y(t_k)$ as explained in Section~\ref{Di}, we obtain
\begin{subequations}\label{first}
\begin{align}
x(t_{k+1})-y(t_{k+1})
&= \int_{t_k}^{t_{k+1}} f(x(t))-f(y(t))\,dt \label{firstF}\\
&\qquad\qquad + \sum_{i=1}^{m} \int_{t_k}^{t_{k+1}} g_i(x(t))v_i(t)- g_i(y(t))w_i(t) dt.\label{firstG}
\end{align}
\end{subequations}
\vspace{\baselineskip}
Integrating by parts the term~\eqref{firstF}, we obtain
\begin{align*}
\eqref{firstF}\ & = \Bigl[ (t-t_{k+1/2}) \bigl(f(x(t)) - f(y(t))\bigr) \Bigr]_{t_k}^{t_{k+1}} \\
&\hspace{3em} - \int_{t_k}^{t_{k+1}} (t-t_{k+1/2})\frac{d}{dt}\bigl( f(x(t))- f(y(t))\bigr)dt \\[\jot]
& = (h_k/2) \bigl(f(x(t_{k+1}))-f(y(t_{k+1})) \bigr) \\
&\hspace{3em} - \int_{t_k}^{t_{k+1}} (t-t_{k+1/2}) \bigl( Df(x(t))\dot x(t) - Df(y(t))\dot y(t)\bigr)dt.
\end{align*}
\noindent There two ways that we deal with term~\eqref{firstG}. First we rewrite the term inside the integral as
\begin{align*}
g_i(x(t))v_i(t)- g_i(y(t))w_i(t) = (g_i(x(t))-g_i(y(t)))\,w_i(t) + g_i(x(t))\,(v_i(t)-w_i(t)),
\end{align*}
\noindent and then integrate by parts the second term to obtain
\begin{subequations}\label{secondG1}
\begin{align}
\eqref{firstG} &= \sum_{i=1}^{m} \int_{t_k}^{t_{k+1}} (g_i(x(t))-g_i(y(t)))\,w_i(t)\,dt \notag \\
&+ \sum_{i=1}^{m} \Bigl[g_i(x(t))(\hat{v}_i(t)- \hat{w}_i(t))\Bigr]_{t_k}^{t_{k+1}}
-\sum_{i=1}^{m}\int_{t_k}^{t_{k+1}} \frac{d}{dt}\Bigl( g_i(x(t))\Bigr)\,(\hat{v}_i(t)-\hat{w}_i(t))\, dt\notag \\
&=\sum_{i=1}^{m} \int_{t_k}^{t_{k+1}} (g_i(x(t))-g_i(y(t)))\,w_i(t)\,dt \label{secondG1a} \\
&\qquad\qquad+ \sum_{i=1}^{m} g_i(x(t_{k+1}))(\hat{v}_i(t_{k+1})- \hat{w}_i(t_{k+1}))\label{secondG1b}\\
&\qquad\qquad-\sum_{i=1}^{m}\int_{t_k}^{t_{k+1}} Dg_i(x(t))\,\dot x(t)\,(\hat{v}_i(t)-\hat{w}_i(t))\, dt\label{secondG1c}
\end{align}
\end{subequations}
\noindent The second derivation is obtained just by integrating by parts,
\begin{subequations}\label{secondG}
\begin{align}
\eqref{firstG}\ &= \sum_{i=1}^{m} \Bigl[ g_i(x(t))\hat{v}_i(t) - g_i(y(t))\hat{w}_i(t) \Bigr]_{t_k}^{t_{k+1}} \notag \\
&\qquad\qquad - \sum_{i=1}^{m} \int_{t_k}^{t_{k+1}}\frac{d}{dt}\Bigl(g_i(x(t))\Bigr) \hat{v}_i(t) - \frac{d}{dt}\Bigl(g_i(y(t))\Bigr) \hat{w}_i(t)\,\, dt\notag\\
&= \sum_{i=1}^{m} g_i(x(t_{k+1}))\hat{v}_i(t_{k+1}) - g_i(y(t_{k+1}))\hat{w}_i(t_{k+1})\label{secondGa} \\
&\qquad\qquad - \sum_{i=1}^{m} \int_{t_k}^{t_{k+1}} Dg_i(x(t)) \hat{v}_i(t) \dot{x}(t)- Dg_i(y(t))\hat{w}_i(t)\dot{y}(t) \,\, dt\label{secondGb}
\end{align}
\end{subequations}
\noindent Equations~\eqref{firstF} and~\eqref{secondG1} can be used to derive second-order local error estimates.
\vspace{\baselineskip}
By applying the mean value theorem (Theorem~\ref{MVT}) we obtain
\begin{equation*}
f(x(t_{k+1}))-f(y(t_{k+1}) = \int_{0}^{1} Df(z(s))ds\; \bigl(x(t_{k+1})-y(t_{k+1})\bigr)
\end{equation*}
\noindent Hence
\begin{subequations}\label{secondF}
\begin{align}
\eqref{firstF}\ &= (h_k/2) \int_{0}^{1} Df(z(s))ds\; \bigl(x(t_{k+1})-y(t_{k+1})\bigr) \label{secondFa}\\
&\hspace{5em} - \int_{t_k}^{t_{k+1}} (t-t_{k+1/2}) \bigl( Df(x(t))\dot x(t) - Df(y(t))\dot y(t)\bigr)dt. \label{secondFb}
\end{align}
\end{subequations}
Separate the second part of the integrand in~\eqref{secondFb} as
\begin{subequations}
\begin{align}
Df(x(t))\, \dot x(t) - Df(y(t))\,\dot y(t) &= Df(x(t))\,\bigl(\dot x(t) -\dot y(t)\bigr) \label{sepFa}\\[\jot]
&\qquad\qquad +\bigl(Df(x(t)) - Df(y(t))\bigr)\,\dot y(t) \label{sepFb}
\end{align}
\end{subequations}
The first term of the right-hand-side can be expanded using
\begin{align*}
\dot x(t) -\dot y(t)
&= f(x(t)) - f(y(t)) + \sum_{i=1}^{m} \bigl(g_i(x(t))-g_i(y(t))\bigr)w_i(t) \\
&\hspace{12em} + \sum_{i=1}^{m} g_i(x(t))\bigl((v_i(t)-w_i(t)\bigr).
\end{align*}
\noindent Hence we obtain
\begin{subequations}\label{thirdF}
\begin{align}
\eqref{firstF} & = (h_k/2) \int_{0}^{1} Df(z(s))ds\, (x(t_{k+1})-y(t_{k+1}))\label{thirdFa}\\
&\qquad - \int_{t_k}^{t_{k+1}} (t-t_{k+1/2}) \,\, Df(x(t))\,\,(f(x(t)) - f(y(t))) \, dt\label{thirdFb}\\
&\qquad - \sum_{i=1}^{m} \int_{t_k}^{t_{k+1}} (t-t_{k+1/2}) \,\, Df(x(t))\,\,(g_i(x(t))-g_i(y(t)))w_i(t) \,dt\label{thirdFc}\\
&\qquad - \sum_{i=1}^{m} \int_{t_k}^{t_{k+1}} (t-t_{k+1/2}) \,\, Df(x(t))\,\,g_i(x(t))\,\,(v_i(t)-w_i(t)) \, dt,\label{thirdFd} \\
&\qquad - \int_{t_k}^{t_{k+1}} (t-t_{k+1/2}) \,\, (Df(x(t)) - Df(y(t)))\,\,\dot y(t) dt\label{thirdFe}
\end{align}
\end{subequations}
where~\eqref{thirdFa} is~\eqref{secondFa},~(\ref{thirdF}b-d) come from~\eqref{sepFa}, and~\eqref{thirdFe} comes from~\eqref{sepFb}.
Note that for any $C^1$-function $h(x)$ we can write
\begin{equation*}
|h(x(t)) - h(y(t)) | \leq
\| Dh(z(t)) \| \cdot | x(t) - y(t) |
\end{equation*}
where $z(t)\in \overline{\mathrm{conv}}\{x(t),y(t)\}$. This will allow us to obtain third-order bounds for terms~(\ref{thirdF}b,c,e).
In order to obtain a third-order estimate for term~\eqref{thirdFd}, a further integration by parts is needed. We obtain:
\begin{subequations}\label{thirdFp}
\begin{align}\addtocounter{equation}{3}
\eqref{thirdFd} &= - \sum_{i=1}^{m} \Bigl[ Df(x(t)) \, g_i(x(t)) \, {\mbox{\small$\displaystyle\int_{t_k}^{t}$}} (s-t_{k+1/2}) (v(s)-w(s)) ds \Bigr]_{t_k}^{t_{k+1}} \notag \\
&\qquad\begin{aligned} &\qquad + \int_{t_k}^{t_{k+1}} \bigl(D^2f(x(t))\,g_i(x(t))+ Df(x(t))Dg_i(x(t))\bigr)\,\dot x(t) \label{thirdFdp}\\
&\hspace{14em} \int_{t_k}^{t} (s-t_{k+1/2})(v_i(s)-w_i(s))ds\ dt .
\end{aligned}
\end{align}
\end{subequations}
\noindent Using similar type of derivation as for the derivation of~\eqref{thirdF}, again using the mean value theorem and integration by parts, we obtain
\begin{subequations}\label{thirdG}
\begin{align}
\eqref{secondGa}+\eqref{secondGb} &= \sum_{i=1}^{m} \int_{0}^{1} Dg_i(z(s))ds\;\bigl(x(t_{k+1})-y(t_{k+1})\bigr)\hat{w}_i(t_{k+1})\label{thirdGa}\\
&\qquad + \sum_{i=1}^{m} g_i(x_{k+1})\bigl(\hat{v}_i(t_{k+1}) - \hat{w}_i(t_{k+1})\bigr)\label{thirdGb}\\
&\qquad - \sum_{i=1}^{m} \int_{t_k}^{t_{k+1}} \bigl(Dg_i(x(t)) - Dg_i(y(t))\bigr)\,\dot y(t)\, \hat{w}_i(t) dt\label{thirdGc}\\
&\qquad - \sum_{i=1}^{m} \int_{t_k}^{t_{k+1}} Dg_i(x(t))\,\bigl(f(x(t)) - f(y(t))\bigr)\, \hat{w}_i(t)\, dt\label{thirdGd}\\
&\qquad - \sum_{i=1}^{m} \int_{t_k}^{t_{k+1}} Dg_i(x(t))\,f(x(t))\,\bigl(\hat{v}_i(t)-\hat{w}_i(t)\bigr)\label{thirdGe}\\
&\qquad - \sum_{i=1}^{m}\sum_{j=1}^{m} \int_{t_k}^{t_{k+1}} Dg_i(x(t))\,\bigl(g_j(x(t))-g_j(y(t))\bigr)\,w_j(t)\, \hat{w}_i(t)\,dt\label{thirdGf}\\
&\qquad - \sum_{i=1}^{m}\sum_{j=1}^{m} \int_{t_k}^{t_{k+1}} Dg_i(x(t))\,g_j(x(t))\,\bigl(v_j(t)\hat{v}_i(t)-w_j(t)\hat{w}_i(t)\bigr) \, dt. \label{thirdGg}
\end{align}
\end{subequations}
The term~\eqref{thirdGe} can be further integrated by parts to obtain
\begin{subequations}\label{thirdGp}
\begin{align}\addtocounter{equation}{4}
\eqref{thirdGe}& = - \sum_{i=1}^{m} \Bigl[ Dg_i(x(t)) \, f(x(t)) \, {\mbox{\small$\displaystyle\int_{t_k}^{t}$}} (\hat{v}(s)-\hat{w}(s))ds \Bigr]_{t_k}^{t_{k+1}} \notag\\
& \qquad + \sum_{i=1}^{m} \int_{t_k}^{t_{k+1}} \bigl( D^2g_i(x(t))\,f(x(t)) + Dg_i(x(t))\,Df(x(t)) \bigr) \dot{x}(t) \,(\hat{\hat{v}}_i(t)-\hat{\hat{w}}_i(t))\, dt \label{thirdGep}
\end{align}
and the term~\eqref{thirdGg} to obtain
\begin{align}\addtocounter{equation}{1}
\eqref{thirdGg}&= - \sum_{i=1}^{m}\sum_{j=1}^{m} \Bigl[ Dg_i(x(t))\,g_j(x(t))\,{\mbox{\small$\displaystyle\int_{t_k}^{t}$}} \bigl(v_j(s)\hat{v}_i(s)-w_j(s)\hat{w}_i(s)\bigr) ds \Bigr] \notag\\
&\qquad\begin{aligned} &\qquad + \sum_{i=1}^{m}\sum_{j=1}^{m} \int_{t_k}^{t_{k+1}} \bigl( D^2g_i(x(t))\,g_j(x(t))+Dg_i(x(t))\,Dg_j(x(t)) \bigr)\,\dot{x}(t) \\[-\jot]
&\hspace{16em} \,{\mbox{\small$\displaystyle\int_{t_k}^{t}$}} \bigl(v_j(s)\hat{v}_i(s)-w_j(s)\hat{w}_i(s)\bigr)ds \ dt.
\end{aligned}
\label{thirdGgp}
\end{align}
\end{subequations}
\noindent Equations~(\ref{thirdF}-\ref{thirdGp}) can be used to derive third-order local error estimates.
\subsection{Local error estimates}\label{formulas}
We proceed to give formulas for the local error having different assumptions on functions $f(\cdot)$, $g_i(\cdot)$
and $w_i(\cdot)$.
We present necessary and sufficient conditions
for obtaining local errors of $O(h)$, $O(h^2)$, $O(h^3)$, and give a methodology
to obtaining even higher-order errors. In addition, we give formulas for the error calculation in several cases.
\subsubsection{Local error of $O(h)$}
\begin{theorem}\label{case1}
For any $k\ge 0$, and all $i=1,...,m$, if
\begin{itemize}
\item $f(\cdot)$ is a Lipschitz continuous vector function,
\item $g_i(\cdot)$ are continuous vector functions, and
\item $w_i(t)=0$ on $[t_k,t_{k+1}]$,
\end{itemize}
\noindent then the local error is of $O(h)$. Moreover, a formula
for the error bound is:
\begin{equation}\label{orderh}
\bigl| x(t_{k+1}) - y(t_{k+1})\bigr| \le h_k\,K'\, \frac{e^{\Lambda h_k} -1}{\Lambda\,h_k}.
\end{equation}
Alternatively, we can use
\begin{equation}\label{orderhalt}
\bigl| x(t_{k+1}) - y(t_{k+1})\bigr| \le h_k\,\biggl(2K + K'\biggr) .
\end{equation}
\end{theorem}
\begin{proof} Since $w_i(t)=0$, we have $\dot{y}(t)=f(y(t))$. Using the bounds given in~\eqref{bounds}, we can take $l(t)=\Lambda$ in Theorem~\ref{lnt}. Further,
and
\begin{align*}
\biggl\|\dot y(t) - \Bigl(f(y(t)) + \sum_{i=1}^m g_i(y(t))v_i(t)\Bigr)\biggr\| = \biggl\|\sum_{i=1}^m g_i(y(t))v_i(t))\biggr\| \le \sum_{i=1}^{m} K_i\,V_i=K'
\end{align*}
%
\noindent so we can take $\delta(t)=\sum_{i=1}^{m} K_i\,V_i$. Hence the formula~\eqref{orderh} is obtained directly from Theorem~\ref{lnt}.
Note that $(e^{\Lambda h_k} - 1)/(\Lambda\,h_k) = 1 + \Lambda h_k/2 + \cdots$ is $O(1)$,
so the local error is of $O(h)$. Equation~\eqref{orderhalt} can be obtained by noting that $\sup_{t\in [t_k,t_{k+1}]} ||f(x(t))-f(y(t))||\le 2K$.
\end{proof}
\subsubsection{Local error of $O(h^2)$}
\begin{theorem}\label{case2}
For any $k\ge 0$, and all $i=1,...,m$, if
\begin{itemize}
\item $f(\cdot)$, $g_i(\cdot)$ are $C^1$ vector functions, and
\item $w_i(\cdot)$ are bounded measurable functions defined on $[t_k,t_{k+1}]$ which satisfy
\begin{equation}\label{se1}
\int_{t_k}^{t_{k+1}} v_i(t) - w_i(t) \, dt = 0,
\end{equation}
\end{itemize}
\noindent then an error of $O(h^2)$ is obtained.
\end{theorem}
\begin{proof} To show that the error is of $O(h^2)$, we use equations~(\ref{first},\ref{secondG1}).
The equation~\eqref{firstF} is in the
desired form, i.e., of $O(h^2)$, since we can write
\[
\biggl|\int_{t_k}^{t_{k+1}} f(x(t)) - f(y(t))\,dt\biggr|
\le h \, L\; {\textstyle\sup_{t\in [t_k,t_{k+1}]}}\|x(t)-y(t)\|,
\]
\noindent and $\sup_{t\in (t_k,t_{k+1})}\|x(t)-y(t)\|$ is of $O(h)$
by Theorem \ref{lnt}. Similarly, equations~\eqref{secondG1a} and~\eqref{secondG1c} are of $O(h^2)$.
Note that the equation~\eqref{secondG1b} is zero due to
(\ref{se1}). The theorem is proved.
\end{proof}
In order to be able to compute the errors, we need the bounds on the functions $w_i(\cdot)$. In particular,
we can restrict $w_i(\cdot)$ to belong to certain class of functions, such as polynomial or step functions.
\medskip
\begin{theorem}\label{case2a}
For any $k\ge 0$, and all $i=1,...,m$, if
\begin{itemize}
\item $f(\cdot)$, $g_i(\cdot)$ are $C^1$ vector functions, and
\item $w_i(t)$ are real valued, constant functions defined on $[t_k,t_{k+1}]$ by
$ w_i=\frac{1}{h_k} \int_{t_k}^{t_{k+1}} v_i(t)dt , $
\end{itemize}
\noindent then a formula for calculation of the local error is given by
\begin{align}\label{constantapproximationsecondordererror}
\|x(t_{k+1}) - y(t_{k+1})\| \leq h_k^2\,\left(\left(K+K'\right)L'/3+2\,K'\,\left(L + L'\right)\,\frac{e^{\Lambda\, h_k}-1}{\Lambda\,h_k}\right).
\end{align}
\end{theorem}
\begin{proof}
To derive~\eqref{constantapproximationsecondordererror}, we obtain $\|x(t_{k+1}) - y(t_{k+1})\|$ from equations~\eqref{firstF} and~\eqref{secondG1}.
Using the bounds given in~\eqref{bounds},
it is immediate that $||\dot{x}||\leq K + \sum_{i+1}^{m} V_i\,K_i$,
and straightforward to show that $|w_i(t)|\leq V_i$ and $|\hat{v}_i(t)-\hat{w}_i(t)| \leq 2V_i\,h_k$ for $t\in[t_k,t_{k+1}]$.
However, we can get a slighly better bound $|\hat{v}_i(t)-\hat{w}_i(t)|\le V_i\, h_k/2$ by considering the following:
Without loss of generality, assume $t\in[0,h]$, and let
\begin{align*}
a_i(t)=\frac{1}{t}\,\int_{0}^{t} v_i(s)\,ds,\ \ \
b_i(t)=\frac{1}{h-t}\,\int_{t}^{h-t} v_i(s)\,ds
\end{align*}
\noindent and define \[w_i(t)=(t\,a_i(t)\, +\,(h-t)\,b_i(t))/h.\]
Then, $w_i=w_i(t)$ is constant for all $t\in [0,h]$.
Notice that $\hat{v}_i(t)=ta(t)$ and $\hat{w}_i(t)=(t/h)(ta(t)+(h-t)b(t))$.
Hence, we have
\begin{align*}
\hat{v}_i(t)-\hat{w}_i(t) &= t(h-t)(a(t)-b(t))/h,\\
|\hat{v}_i(t)-\hat{w}_i(t)| &= t(h-t)|a(t)-b(t)|/h \le V_i\,h/2.
\end{align*}
Additionally, we can prove that $\int_{t_k}^{t_{k+1}}|\hat{v}_i(t)-\hat{w}_i(t)|\,dt \leq V_i\,h_k^2 /3$.
Take $z(t)$ to satisfy the differential equation $\dot{z}(t)=f(z(t))$.
From Theorem \ref{lnt}, we have
\[
\|x(t)-z(t)\|,\|y(t)-z(t)\| \le h_k\,\Bigl(\sum_{i=1}^{m} K_i\,V_i\Bigr)\,\frac{e^{\Lambda h_k} -1}{\Lambda\,h_k}
\]
and hence
\[
\|x(t)-y(t)\| \le 2\,h_k\,\Bigl(\sum_{i=1}^{m} K_i\,V_i\Bigr)\,\frac{e^{\Lambda h_k} -1}{\Lambda\,h_k}
\]
\noindent for $t\in [t_k,t_{k+1}]$. Taking the norm of the equations (\ref{firstF},\ref{secondG1a},\ref{secondG1c})
we obtain
\begin{multline*}
\|x(t_{k+1})-y(t_{k+1})\| \leq
\int_{t_k}^{t_{k+1}} \Bigl( L+\sum_{i=1}^{m} V_iL_i \Bigr)\,\biggl( 2\,h_k\,\Bigl(\sum_{i=1}^{m} K_i\,V_i\Bigr)\,\frac{e^{\Lambda h_k} -1}{\Lambda\,h_k}\biggr) \\
\qquad\qquad+ \sum_{i=1}^{m} L_i \Bigl( K+\sum_{j=1}^{m} V_jK_j\Bigr) \bigl| \hat{v}_i(t)-\hat{w}_i(t) \bigr|\,dt \\
\leq {h_k}^2 \biggl( \Bigl( L+\sum_{i=1}^{m} V_iL_i \Bigr)\,\biggl( 2\,\Bigl(\sum_{i=1}^{m} K_i\,V_i\Bigr)\,
\frac{e^{\Lambda h_k} -1}{\Lambda\,h_k}\biggr)
+ \frac{1}{3}\Bigl(\sum_{i=1}^{m} V_iL_i\Bigr) \Bigl( K+\sum_{j=1}^{m} V_jK_j\Bigr) \biggr) .
\end{multline*}
Using $K'$ and $L'$, we get the desired formula (\ref{constantapproximationsecondordererror}).
\end{proof}
\begin{remark}
Note that as $\Lambda\rightarrow 0$, then $\frac{e^{\Lambda\, h}-1}{\Lambda\,h}\rightarrow 1$.
This is also consistent with Theorem \ref{lnt}. In fact, if $\Lambda=0$, we get
\[
\|x(t)-y(t)\| \le 2\,h_k\,\Bigl(\sum_{i=1}^{m} K_i\,V_i\Bigr)
\]
\noindent and therefore,
\begin{align}
\|x(t_{k+1}) - y(t_{k+1})\| \leq h_k^2\,\bigl(\left(K+K'\right)L'/3+2\,K'\,\left(L + L'\right)\bigr),
\end{align}
\noindent which is still of $O(h^2)$. Further, we will not give explicit formulas for the error when $\Lambda=0$.
\end{remark}
\begin{theorem}\label{case2a1}
If all assumptions of Theorem \ref{case2a} are satisfied, and in addition $f(\cdot)$ is $C^2$,
then a formula for calculation of the local error can be given by
\begin{align*}
& \biggl(1-(h_kL/2)\biggr)\|x(t_{k+1}) - y(t_{k+1})\| \le (h_k^2/3)\, \left(3\,K'\,L'\,\,\frac{e^{\Lambda h_k} -1}{\Lambda h_k} + L'\,(K+K')\right)\\
&\qquad\qquad + (h_k^3/4)\, K'\, \biggl(L\,L'+ L^2 + H(K+K')\biggr)\, \frac{e^{\Lambda h_k} -1}{\Lambda h_k}\\
&\qquad\qquad + (11\,h_k^3/24)\,(H\,K'+L\,L')(K+K').
\end{align*}
\end{theorem}
\begin{proof}
The same bounds on functions apply as in Theorem \ref{case2a}.
The formula for $\|x(t_{k+1}) - y(t_{k+1})\|$ is then obtained by taking norms of terms in equations~\eqref{thirdF} and~\eqref{secondG1}.
\end{proof}
\begin{remark}
The computation of the error bound is complicated by that fact that $|v_i(t)-w_i(t)|$ is not uniformly small.
This means that the terms $g(x)(v_i-w_i)$ must be integrated over a complete time step in order to be able to use the fact that
$\int_{t_k}^{t_{k+1}} v_i(t)\,dt = \int_{t_k}^{t_{k+1}} w_i(t)\,dt$, and this must be done \emph{without} first taking norms inside the integral.
As a result, we cannot apply results on the logarithmic norm exactly directly.
Instead, we ``bootstrap'' the procedure by applying a first-order estimate for $\|x(t)-y(t)\|$ valid for any $t\in[t_k,t_{k+1}]$.
\end{remark}
\subsubsection{Local error $O(h^2)+O(h^3)$}
\label{sec:twoparametererror}
We can attempt to improve the error bounds by allowing $w_i(t)$ to have two independent parameters.
In the general case, we shall see that this gives rise to a local error estimate containing terms of $O(h^2)$ and $O(h^3)$, rather than the anticipated pure $O(h^3)$ error.
We require $w_i(t)$ to satisfy the equations
\begin{equation}\label{se2}
\int_{t_k}^{t_{k+1}} v_i(t) - w_i(t) \, dt = 0; \qquad
\int_{t_k}^{t_{k+1}} (t-t_{k+1/2})\,\,(v_i(t) - w_i(t)) \, dt = 0.
\end{equation}
If the $w_i$ are taken to be affine functions, $w_i(t)=a_{i,0}+a_{i,1}(t-t_{k+1/2})/h_k$, then we have
\begin{equation}
\label{eq:polynomialparameterformulae}
a_{i,0} = \frac{1}{h_k}\, \int_{t_k}^{t_{k+1}} v_i(t)dt; \qquad
a_{i,1} = \frac{12}{h_k^2} \, \int_{t_k}^{t_{k+1}} v_i(t)\,(t-t_{k+1/2}) \, dt.
\end{equation}
\noindent It is easy to see that
\begin{equation} \label{eq:polynomialparameterbounds}
|a_{i,0}|\le V_i, \ |a_{i,1}|\le 3\,V_i, \ |w_i(t)| \le 5V_i/2,\ \text{and}\ |\dot w(t)|\le 3V_i/2h_k
\end{equation}
\noindent and it can further be shown that
\begin{equation} \label{eq:polynomialquadraticparameterbounds}
|a_{i,1}|\le 3V_i(1-(a_{i,0}/V_i)^2) .
\end{equation}
An alternative is to use step functions for $w_i$, such as
\begin{displaymath}
w_i(t) = \left\{
\begin{array}{rl}
a_{i,0} & \text{if } t_k\le t < t_{k+1/2}\\
a_{i,1} & \text{if } t_{k+1/2} \le t \le t_{k+1}.
\end{array}
\right.
\end{displaymath}
\noindent Then
\begin{align*}
a_{i,0} &= \frac{1}{h_k}\, \int_{t_k}^{t_{k+1}} v_i(t)\,dt - \frac{4}{h_k^2} \, \int_{t_k}^{t_{k+1}} v_i(t)(t-t_{k+1/2})\,dt\\[3\jot]
a_{i,1} &= \frac{1}{h_k}\, \int_{t_k}^{t_{k+1}} v_i(t)\,dt + \frac{4}{h_k^2} \, \int_{t_k}^{t_{k+1}} v_i(t)(t-t_{k+1/2}) \, dt.
\end{align*}
\noindent Hence
\begin{equation}
|a_{i,0}|\le 2\, V_i, \ |a_{i,1}|\le 2\,V_i, \ \text{and}\ |w_i(t)| \le 2\,V_i.
\end{equation}
\medskip
\begin{theorem}\label{case2b}
For any $k\ge 0$, and all $i=1,...,m$, if
\begin{itemize}
\item $f(\cdot)$ is $C^2$ vector function,
\item $g_i(\cdot)$ are non-constant $C^2$ functions, and
\item the $w_i$ satisfy~\eqref{se2},
\end{itemize}
\noindent then an error of $O(h^2)$ is obtained.
Moreover, if the $w_i$ are affine functions, $w_i(t)=a_{i,0}+a_{i,1}(t-t_{k+1/2})/h_k$, then a formula for calculation of the error is given by
\begin{align*}
&\left(1-L(h_k/2) - h_k L'\right)\|x(t_{k+1}) - y(t_{k+1})\| \le (h_k^2/4) L'\,\left(11 K + (69/2) K' \right) \\
&\qquad\qquad +(7 h_k^3/8)\,K'\,\left( (4H'+H)\, (K +(5/2)K') + L^2 + \left((9/2) L + 5L' \right)L'\right)
\frac{e^{\Lambda h_k} -1}{\Lambda h_k}\\
&\qquad\qquad+(7h_k^3/48) \left( H\,K' + L\,L' \right) \left( K + K' \right).
\end{align*}
\end{theorem}
\begin{proof}
With the assumptions of the theorem, we can improve the terms (\ref{thirdFd}) and (\ref{thirdGe})
such that they become (\ref{thirdFdp}) and~\eqref{thirdGep}, which are of $O(h^3)$.
In addition to the bounds obtained in~\eqref{eq:polynomialparameterbounds}, we use
\begin{align*}
\|\dot x(t)\| & \le K + \sum_{i=1}^{m} K_i\,V_i\,=\,K+K'\\
\|\dot y(t)\| & \le K + \frac{5}{2} \sum_{i=1}^{m} K_i\,V_i=K+(5/2)K'\\
\|x(t)-y(t)\| & \le \frac{7h_k}{2} \left(\sum_{i=1}^{m} K_i\,V_i\right)\frac{e^{\Lambda h_k} -1}{\Lambda\,h_k}=\frac{7h_k}{2}\, K'\,\frac{e^{\Lambda h_k} -1}{\Lambda\,h_k}.
\end{align*}
\noindent The formula for the error, $\|x(t_{k+1}) - y(t_{k+1})\| $ with terms (\ref{thirdFdp}) and (\ref{thirdGep})
is then easily obtained. The theorem is proved.
\end{proof}
\medskip
We now show that with the assumptions of the theorem we cannot in general obtain an error of $O(h^3)$.
Specifically, we assume that $w_i(t)$ are two-parameter polynomial or step functions satisfying
\[ \int_{t_k}^{t_{k+1}} v_i(t)-w_i(t)\,dt = \int_{t_k}^{t_{k+1}} (t-t_{k+1/2})\,(v_i(t)-w_i(t))\,dt = 0 .\]
The following counterexample gives a system for which only $O(h^2)$ local error is possible.
\begin{example}
\noindent Consider the following input-affine system which satisfies assumptions in Theorem~\ref{case2b}:
\begin{equation*}
\dot{x}_1 = x_2 + v_1 + x_1 v_2; \quad \dot{x}_2 = x_1 + v_2; \quad x(t_k)=x_k.
\end{equation*}
Take inputs
\begin{equation*}
v_1(t)=\sin\left(\frac{2\pi}{h_k}(t-t_k)\right), \qquad
v_2(t)=\cos\left(\frac{2\pi}{h_k}(t-t_k)\right).
\end{equation*}
\noindent Using~(\ref{se2}), we get $w_1(t)=-(6/\pi \, h_k)(t-t_{k+1/2})$, $w_2(t)=0$.
Therefore, an approximation equation looks like
\begin{equation*}
\dot{y}_1 = y_2 + w_1; \quad \dot{y}_2 = y_1
\end{equation*}
As shown in the previous section, the only term which might not have order
$h_k^3$ is the term in~\eqref{thirdGg}
which is reduced to
\[
\sum_{i=1}^2 \int_{t_k}^{t_{k+1}} Dg_2(x(t))g_i(x(t))\,v_i(t) \hat{v}_2(t) dt,
\]
\noindent since $Dg_1=0$. When $i=2$, we have $\frac{1}{2} \frac{d}{dt}(\hat{v}_i^2(t))= v_i(t)\hat{v}_i(t)$,
and hence we can integrate by parts once more to get the $O(h^3)$. Then we are left with
\begin{align*}
\int_{t_k}^{t_{k+1}} Dg_2(x(t))g_1(x(t))\,v_1(t) \hat{v}_2(t) dt = -\frac{h_k^2}{4\pi}\,\, [1\,\,\, 0]^T ,
\end{align*}
\noindent a term of $O(h^2)$.
\end{example}
\subsubsection{Local error of $O(h^3)$}
\label{sec:twoparameteradditiveinputerror}
We showed that for a general input-affine system, a local error of order
$O(h^3)$ cannot be obtained using affine approximate inputs $w(a,t)$. However, if in addition, we assume that $g_i(\cdot)$
are constant functions or we have a single input then we can obtain a local error of $O(h^3)$.
If $g_i(\cdot)$ are constant functions, then the error calculation is equivalent to the error calculation
of an even simpler case, so called additive noise case. The equation is then given by
\begin{equation}\label{an}
\dot x(t) = f(x(t)) + v(t).
\end{equation}
\noindent Here, $v(t)=(v_1(t),...,v_n(t))$ is vector-valued.
\begin{corollary}\label{case3a}
For any $k\ge 0$,
\begin{itemize}
\item if the system has additive noise,
\item $f(\cdot)$ is a $C^2$ function, and
\item $w_i(t)$ are real valued functions defined on $[t_k,t_{k+1}]$ which satisfy equations~\eqref{se2},
\end{itemize}
\noindent then an error of $O(h^3)$ is obtained. Moreover, for $w_i(t)=a_{i,0}+a_{i,1}(t-t_{k+1/2})/h_k$, the formula for the local error is given by:
\begin{equation}\label{ine}
\begin{aligned}
\bigl( 1-(h_k/2) L \bigr)\|x(t_{k+1})-y(t_{k+1})\| &\le \frac{7}{48}\, h_k^3\,K'\,H\,(K+K')\\
&\,\,+ \frac{7}{8}\,h_k^3\,K'\,\Bigl(L^2\, +\, H\,(K+5K'/2)\Bigr)\frac{e^{\Lambda h_k}-1}{\Lambda\,h_k}.
\end{aligned}
\end{equation}
\end{corollary}
\noindent The formula for the error in additive noise case is simplified
because $L'=H'=0$. If we write $||v(t)||= K'$, then the result follows directly from Theorem~\ref{case2b}.
\begin{corollary}\label{case3b}
For any $k\ge 0$, if
\begin{itemize}
\item the input-affine system has single input, i.e., $m=1$ in~\eqref{ca}
\item $f(\cdot)$ and $g(\cdot)$ are $C^2$ functions, and
\item $w(t)$ is a real valued function defined on $[t_k,t_{k+1}]$ which satisfies equations~\eqref{se2},
\end{itemize}
\noindent then an error of $O(h^3)$ is obtained. Moreover, for $w(t)=a_{0}+a_{1}(t-t_{k+1/2})$, the formula for
the local error is given by
\begin{align*}
&\left( 1-(h_k/2) L - h_kL'\right)\|x(t_{k+1})-y(t_{k+1})\| \le \\
&\frac{7\,h_k^3}{8}\,K' \left((H\,+\,10\,H')(K+(5/2)K') + L^2\, +\,(25/2)\,L\,L'\,+\,25\,(L')^2\right)\frac{e^{\Lambda h_k}-1}{\Lambda\,h_k}\\
&+ \frac{h_k^3}{48}\,(K+K')\,\left((7/6)(H\,K'+L\,L') + 28\,(H'\,K+L\,L') + 29\,(H'\,K'+(L')^2) \right).
\end{align*}
\end{corollary}
\begin{proof}
The result follows since the only term which is not $O(h^3)$ in~(\ref{thirdF},\ref{thirdG}) is~\eqref{thirdGg}.
In the one-input case, this simplifies to
\[ \int_{t_k}^{t_{k+1}} Dg(x(t))\,g(x(t))\,\bigl(\hat{v}(t)\,v(t)-\hat{w}(t)\,w(t)\bigr)\,dt .\]
However, we can integrate by parts to obtain
\begin{align*}
\eqref{thirdGg} &= \Bigl[ Dg(x(t))\,g(x(t))\,\bigl(\hat{v}(t)^2-\hat{w}(t)^2\bigr) \Bigr]_{t_k}^{t_{k+1}} \\
&\qquad\qquad - \int_{t_k}^{t_{k+1}} D\bigl(Dg(x(t))\,g(x(t))\bigr)\,\dot{x}(t)\,\bigl(\hat{v}(t)^2-\hat{w}(t)^2\bigr)\,dt .
\end{align*}
The first term vanishes since $\hat{v}(t_{k_1})=\hat{w}(t_{k+1})$, and the second is $O(h^3)$ since $\hat{v}(t)$ and $\hat{w}(t)$ are $O(h)$.
Taking all the bounds as in Theorem \ref{case2b}, the formula is easily obtained.
\end{proof}
Observing the error given by equations~\eqref{thirdF} and~\eqref{thirdG} , we see that if in addition to
satisfying equations given in~\eqref{se2}, the functions $w_i(\cdot)$ also satisfy
\begin{equation}\label{a3}
\int_{t_k}^{t_{k+1}} v_i(t)\hat{v}_j(t) - w_i(t)\hat{w}_j(t)\,\,dt\, =\, 0.
\end{equation}
\noindent then we could get an error of $O(h^3)$. The question remains as to whether we can find
functions $w_i(\cdot)$ that satisfy the conditions~(\ref{se2},\ref{a3}).
Since the functions $w_i(\cdot)$ cannot be computed independently any more,
the number of parameters of each $w_i(\cdot)$ will depend on the number of inputs.
\begin{theorem}
For any $k\ge 0$, if
\begin{itemize}
\item $f(\cdot)$, $g_i(\cdot)$ are $C^2$ real vector functions, and
\item $w_i(a_{i,0},...,a_{i,p\!-\!1},t)$ are real valued, defined on $[t_k,t_{k+1}]$, and satisfy
\begin{equation}\label{se3}
\begin{gathered}
\int_{t_k}^{t_{k+1}} v_i(t) - w_i(t) \, dt = 0\\
\int_{t_k}^{t_{k+1}} (t-t_{k+1/2})\,(v_i(t) - w_i(t)) \, dt = 0\\
\int_{t_k}^{t_{k+1}} v_i(t)\hat{v}_j(t) - w_i(t)\hat{w}_j(t)\,\,dt\, =\, 0,
\end{gathered}
\end{equation}
\end{itemize}
\noindent for all $i,j=1,...,m$, then an error of $O(h^3)$ can be obtained.
Note that it suffices to take $j<i$ in~\eqref{se3}, and that the number of parameters $p$ in each $w_i$ must satisfy $p \geq (m+3)/2$.
Taking polynomials of minimal degree $d$, we obtain $d=\lceil(m+1)/2\rceil$.
\end{theorem}
\begin{proof} If we can find $w_i(t)$ that satisfies above, then it is obvious that the only remaining $O(h^2)$ term~\eqref{thirdGg}
can be integrated by parts once more in order to give a term of $O(h^3)$.
This follows from Theorem \ref{case2}, Corollary~\ref{case3b} and the formulae in Section~\ref{errde}.
To see that we can find the desired functions $w_i(\cdot)$, we consider polynomial approximations $w_i$ of degree $d=p+1$.
We will show that it is possible to solve for the parameters of $w_i$'s.
If $m=1$, see Corollary \ref{case3b}. The system of equations~\eqref{se3} consists of at most $m+m+m(m-1)/2=m(m+3)/2$ independent equations.
To see that third equation in~\eqref{se3} has at most $m(m-1)/2$ independent equations necessary to be zero, notice that when $i=j$ we have
\begin{align*}
\int_{t_k}^{t_{k+1}} v_i(t) \hat{v}_i(t) - w_i(t) \hat{w}_i(t)\, dt&= (1/2) [\hat{v}_i^2(t_{k+1})- \hat{w}_i^2(t_{k+1})],
\end{align*}
\noindent and therefore we can integrate by parts once more to get error of $O(h^3)$. When $j>i$ integration by parts gives
\begin{align*}
\int_{t_k}^{t_{k+1}} v_i(t) \hat{v}_j(t) - w_i(t) \hat{w}_j(t)\, dt &= \bigl[\hat{v}_i(t)\,\hat{v}_j(t) - \hat{w}_i(t)\hat{w}_j(t)\bigr]_{t_k}^{t_{k+1}}\\
&\qquad\qquad - \int_{t_k}^{t_{k+1}} \hat v_i(t) {v}_j(t) -\hat w_i(t) {w}_j(t)\, dt
\end{align*}
and the first term vanishes since $\hat{v}_i(t_{k+1}) = \hat{w}_i(t_{k+1})$.
The number of parameters that each $w_i(\cdot)$ has is $p=d+1$.
Thus, in total, we have $mp$ parameters.
In order to guarantee that we can solve all the equations for the $w_i(\cdot)$'s,
we need that $mp \ge m(m+3)/2$. This implies that $p\ge (m+3)/2$.
Taking polynomials of minimal degree, we see that we require $d=\lceil(m+1)/2\rceil$.
\end{proof}
In what follows, we write $C(n,m)=n!/(m!\,(n-m)!)$, the formula for combinations
(selecting $m$ elements among $n$ elements).
\begin{table}[t!]
\begin{center}
\begin{tabular}{| c | c | c | c |}
\hline
\#Inputs & \#Equations & Degree & \#Parameters\\
$ m $ & $m(m+3)/2$ & $d$ & $m(d+1)$ \\
\hline
1 & 2 & 1 & 2\\
\hline
2 & 5 & 2 & 6\\
\hline
3 & 9 & 2 & 9\\
\hline
4 & 14 & 3 & 16\\
\hline
5 & 20 & 3 & 20\\
\hline
6 & 27 & 4 & 30 \\
\hline
10 & 65 & 6 & 70 \\
\hline
\end{tabular}
\end{center}
\caption{The number of independet equations which need to be solved, the minimal degree of a polynomial $w_i(\cdot)$ required, and number of available parameters in order to obtain $O(h^3)$ local error for $m$ inputs.}
\label{h3}
\end{table}
In Table \ref{h3}, we present the degree of $w_i(\cdot)$ needed for one to obtain $O(h^3)$ for different number of inputs.
In addition, the number of equations involved and the number of independent parameters in $m$ functions that have to be found are given.
\subsubsection{Higher Order Local Error}
It is possible to generalize the approach used to generate $O(h^3)$ local error.
With additional smoothness requirements on the functions $f(\cdot)$ and $g_i(\cdot)$'s, we can get even higher-order local errors.
In order to simplify the notation, we set $g_0=f$ and $v_0=1$. Then the input-affine system~\eqref{ca}
becomes
\[
\dot x(t) = \sum_{i=0}^{m} g_i(x(t))v_i(t) .
\]
\noindent Let $g_i\in C^r$ for all $i=0,...,m$, and denote by
\[
\dot y(t) = \sum_{i=0}^{m} g_i(y(t))w_i(a_i,t)
\]
\noindent the corresponding approximate system. The local error of $O(h^{r+1})$ can be obtained if $w_i(a_i,t)$ is finitely parametrised, $a_i=(a_{i,0},...,a_{i,d})$ with $d$ being sufficiently large, and satisfying
\begin{subequations}
\begin{align}
\int_{t_k}^{t_{k+1}}v_i(t)\,dt &=\int_{t_k}^{t_{k+1}} w_i(t)\,dt \label{e1} \\
\int_{t_k}^{t_{k+1}} v_j(t) \int_{t_k}^{t} v_i(s)\,ds \ dt &= \int_{t_k}^{t_{k+1}} w_j(t) \int_{t_k}^{t} w_i(s)ds\ dt \label{e2} \\
\int_{t_k}^{t_{k+1}} v_k(t) \int_{t_k}^{t} v_j(s) \int_{t_k}^{s}v_i(r)\,dr\;ds\ dt &= \int_{t_k}^{t_{k+1}} w_k(t) \int_{t_k}^{t} w_j(s) \int_{t_k}^{s}w_i(r)\,dr\;ds\ dt \label{e3}
\end{align}
\begin{multline}
\int_{t_k}^{t_{k+1}} v_{i_r}(s_r) \int_{t_k}^{s_r} v_{i_{r-1}}(s_{r-1})\cdots \int_{t_k}^{s_2} v_{i_1}(s_1)\,ds_1\,\cdots\,ds_{r-1}\,ds_r = \\ \qquad\qquad \int_{t_k}^{t_{k+1}} w_{i_r}(s_r) \int_{t_k}^{s_r} w_{i_{r-1}}(s_{r-1})\cdots \int_{t_k}^{s_2} w_{i_1}(s_1)\,ds_1\,\cdots\,ds_{r-1}\,ds_r
\end{multline}
\end{subequations}
\noindent We can restrict to $i\geq1$ in~\eqref{e1}.
In~\eqref{e2} we can restrict to $i\geq j+1$ as explained in previous subsection. %
In~\eqref{e3}, we can simplify to
\begin{equation*}
\int_{t_k}^{t_{k+1}} v_k(t) \hat{v}_j(t) \hat{v}_i(t)\,dt = \int_{t_k}^{t_{k+1}} w_k(t) \hat{w}_j(t) \hat{w}_i(t)\,dt; \qquad i,j,k\ge 0,\ j\le i
\end{equation*}
Note that for the first two equalities above we need $m + C(m+1,2)$ equations, where $C(n,m)=n!/m!(n-m)!$, which in total gives $(m/2)(m^2+4m+7)$.
For the third one, we need additional $m + 3\,C(m+2,3)$.
In general, it is not easy to see the formula for the number of equations.
The number of parameters and the required degree for $O(h^4)$ are given by
$(m/2)(m^2+4m+7)$ and $N=\lceil(1/2)(m^2 + 4m + 5)\rceil$ respectively.
\section{Improvements and Generalizations}\label{GDI}
In this section, we consider techniques for improving the estimates obtained, and for generalizing the methods to differential inclusions with constraints.
\subsection{Improved approximate solution sets}
The previous error estimates were based on bounding the parameters appearing in the form of the input $w(t)$.
For example, supposing a single input $v(t)\in[-1,+1]$ and taking $w(t)=a_0 + a_1 (t-t_{k+1/2})/h_k$ satisfying $\int_{t_k}^{t_{k+1}} v(t)-w(t)\, dt = \int_{t_k}^{t_{k+1}} t\,v(t)-w(t)\, dt = 0$, we find $|a_0|\leq 1$ and $|a_1|\leq 3$.
However, if $a_0=\pm1$, then $v(t)\equiv \pm1$ on $[t_k,t_{k+1}]$, so $a_1=0$. Similarly, if $|a_1|=3$ then $a_0=0$.
For a given $a_0$, we can maximise $a_1$ by taking
\[ v(t) = \begin{cases} -1 \text{ for } t_k \leq t \leq t_k+\alpha h_k, \\
+1 \text{ for } t_k+\alpha h_k \leq t \leq t_k+h_k = t_{k+1}. \end{cases} \]
where $\alpha = (1-a_0)/2$. For this $v$, we find
\[ \begin{aligned}
a_1 &= \frac{12}{h_k^2} \int_{t_k}^{t_{k+1}} (t-t_{k+1/2}) \, v(t)\,dt
\ = \ \frac{12}{h_k^2} \biggl( \int_{\alpha h_k}^{h_k} (t-h_k/2)\,dt - \int_{0}^{\alpha h_k} (t-h_k/2)\,dt \biggr) \\
&= 3\bigl(1-(1-2\alpha)^2\bigr) \ =\ 3(1-a_0^2)
\end{aligned} \]
yielding the constraint
\[ a_0^2 + |a_1| / 3 \leq 1 . \]
We can therefore set
\begin{equation}\label{eqn:reducedparameterdomain}
w_k(t) = a_0 + { 3(1-a_0^2)b_1 }\,(t-t_{k+1})/h_k \quad \text{with} \quad a_0,b_1\in[-1,+1] .
\end{equation}
This will yield sharper estimates than~\eqref{eq:polynomialparameterformulae}.
\subsection{Differential inclusions with constraints}
Up to now, we have considered affine differential inclusions of the form
\[ \dot{x}(t) = f(x(t)) + \sum_{i=1}^{m} g_i(x(t)) v_i(t) \text{ with } v_i\in[-V_i,+V_i] . \]
In other words, the disturbances $(v_1,\ldots,v_m)$ lie in a coordinate-aligned box $[-V_1,+V_1]\times\cdots\times[-V_k,+V_m]$.
In many problems, the set $V$ containing $(v_1,\ldots,v_m)$ will not be box, but some more complicated set.
We could use our method directly to compute over-approximations to the solution set by taking an over-approximating bounding box $\widehat{V}$ to $V$, but this will typically yield extra solutions, even in the limit of small step size.
Instead, we seek to restrict solutions to those of the original system.
The right-hand-side of the differential inclusion is convex if, and only if, $V$ is a convex set, so it suffices to restrict to this case.
We can write
\[ V = \{ (v_1,\ldots,v_m) \mid v_i \in [-V_i,+V_i] \wedge c(v_1,\ldots,v_k)\leq 0 \} \]
where $c:\mathbb{R}^m\rightarrow\mathbb{R}$ is a convex function.
(More generally, we could consider the disjunction of several such constraints.)
The constraint $c$ yields restrictions on the form of the $w_i$.
For second-order estimates using
\[ w_{k,i}(t) = a_{k,i} = \frac{1}{h_k} \int_{t_k}^{t_{k+1}} v_i(t)\,dt \]
we simply need to introducte the constraints
\begin{equation} \label{eqn:constraints} c(a_{k,1},\ldots,a_{k,m}) \leq 0 \end{equation}
at every step.
For higher-order estimates, the relationship between the parameters and the constraint function may be more complicated; in particular, it need not be the case that $c(w_{k,1}(t),\ldots,w_{k,m}(t))\leq0$ holds.
\subsection{Pseudo-affine inputs}
In this section, we consider differential inclusions of the form
\begin{equation}\label{ial}
\dot x(t) = g(x(t)) + G(x(t))q(v(t)),\,\,\,x(0)=x_0,\,\,\, v(t)\in V
\end{equation}
\noindent where $V$ is compact, convex subset of $\mathbb{R}^m$, and $g:\mathbb{R}^n\rightarrow \mathbb{R}^n$,
$G:\mathbb{R}^n \rightarrow \mathbb{R}^{n \times p}$, and $q:\mathbb{R}^m \rightarrow \mathbb{R}^p$.
The inclusion above can be viewed in two different ways.
One way is to consider
the right-hand side as a function which is non-linear in the input. For example,
consider a one-dimensional polynomial system with inputs,
\[
\dot x(t) = x^7\, v_1^2 + x\, v_2^2 + x^3\,v_1\,v_2 + x^5,\,\,\, (v_1,v_2)\in V \subset \mathbb{R}^2.
\]
\noindent This has a form $g(x) + G(x)q(v)$ by taking
$g(x)=x^5$, $G(x)=(x^7,\, x,\,x^3)$, and $q(v) = (q_1(v),q_2(v),q_3(v)) = (v_1^2,\,v_2^2,\, v_1\,v_2)$.
The other way is to
consider the right-hand side as a function which is linear in the input
\[
\dot x(t) \in g(x(t)) + G(x(t))r(t),\,\,\,r(t)\in q(V)
\]
\noindent This corresponds to the case where, in general $V$, i.e., $q(V)$ above, is convex, but not necessary a box as it was assumed in the
previous section. For example, we can consider $V$ given by constraints such as
$V=\{v(t)\,|\,c(x(t),v(t))\le 0\}$ or $V=\{v(t)\,|\,e(x(t),v(t))=0\}$ for some
continuous functions $c(\cdot)$ and $e(\cdot)$.
In order to compute reachable sets of the system~\eqref{ial}, we proceed as in the previous section.
First we construct an ``approximate'' system
\[
\dot y(t) = g(y(t)) + G(y(t))w(t),
\]
\noindent and then get an error on the approximation.
The local error will be essentially obtained in the same way as before,
i.e., Theorems \ref{case1}-\ref{case3b}, but with certain additional assumptions.
To see what the assumptions should be, suppose that we want to get an error as in Theorem \ref{case2b}.
Then $w(t)=(w_1(t),...,w_m(t))$ is affine and satisfies
the integral equalities
\begin{gather*}
\int_{t_k}^{t_{k+1}} q(v(t)) - w(t) \, dt = 0 \\
\int_{t_k}^{t_{k+1}} t\,(q(v(t))-w(t)) \, dt=0.
\end{gather*}
\noindent As before, we get
\begin{gather*}
a =(a_1,...,a_m)= \frac{1}{h} \int_{t_k}^{t_{k+1}} q(v(t)) dt\\
b =(b_1,...,b_m)= \frac{12}{h^3} \int_{t_k}^{t_{k+1}} q(v(t))(t - t_{k+1/2}) dt
\end{gather*}
\noindent Obviously, we can take box over-approximations for $a$ and $b$, and obtain
over approximations of the reachable sets. However, if $q$ is nonlinear, or
$V$ is not a box, but some general convex set, then box over-approximations
for $a$ and $b$ could result in large over-approximation of the reachable sets.
Therefore, if the set $q(V)$ satisfies additonal assumptions, we can get optimal results
for the parameters $a$ and $b$. For example, if
$q(V)$ is a convex set, centered around the origin, we get $a\in q(V)$
and $b\in (3/h)q(V)$, which gives optimal bounds for the coefficients $a$ and $b$.
\section{Numerical Results}\label{num}
We now illustrate the use of our algorithm by computing reachable sets for some simple systems.
\medskip
\subsection{Van Der Pol Oscillator}\label{VDP}
We consider perturbed Van der Pol oscillator given by
\begin{align*}
& \dot x= y\\
& \dot y= -x + 2\, (1-x^2)\,y + v,
\end{align*}
\noindent where $v$ represents additive noise.
We use the method described in Section~\ref{sec:twoparametererror} and the error bound~\eqref{ine} for additive inputs.
If we take $D=[0,2]\times[-1,3]$ to be the region of computation, then
we get $K=20$, $L=31$, $\Lambda=27$, and $H=12$. In addition, if we assume that
$v(\cdot)\in [-0.08,0.08]$, i.e., $A=0.08$, we obtain
\[
\epsilon=\|x(t_{k+1})-y(t_{k+1})\|\le 11.24\,h^3 + 168.17\, h^3\,\frac{e^{27\,h}-1}{27h}
\]
We use the algorithm described in Section~\ref{Algo} to compute
the solution set
for the set of initial points $X_0=[0.1,0.105]\times[1.5,1.505]$ over the time interval
$[0,1.5]$. Because the bounds $K,\,L,\,\Lambda$, and $H$ are rather large,
we use fairly small step size, $h=0.001$, yielding an analytical single-step error
of $\epsilon = 1.817092608\times 10^{-7}$.
In Figures~\ref{pvdp1} and \ref{pvdp2} we show solution set of the perturbed
Van der Pol oscillator using the above values.
In figure \ref{pvdp1}, splitting of the domain was performed at $t_1=0.6$ and $t_2=1.2$.
At $t_1$ the set was divided in half along $x$-axis, and at $t_2$
the set was divided in half along $y$-axis.
The computed reachable set after $T=1.5$ is a union of the following four sets:
\begin{align*} R(X_0,T) \ \subset \ &[1.46104,1.66704]\times [-0.482307,-0.272922] \\ &\qquad \;\cup\; [1.60834,1.80823]\times[-0.438931,-0.263936] \\ &\qquad\qquad \;\cup\; [1.50247,1.70832]\times[-0.466819,-0.269152] \\ &\qquad\qquad\qquad \;\cup\; [1.65202,1.8518]\times[-0.424135,-0.259941] . \end{align*}
Moreover, if there was no splitting performed the reachable set at $T=1.5$ is then
\[ R(X_0,T)\subset [1.43018,1.88571]\times[-0.513789,-0.197579] , \]
and the computed solution set is presented in \ref{table:pho2}.
From the results obtained, it turns out that the reachable set was smaller when splitting was performed.
Note that the set $D$ in this case was chosen approximately, so that for initial condition $X_0$ and time of computation $T=1.5$,
the solution set of the differential inclusion stays inside $D$. This is done so that analytical error does not have to be recomputed
at each time step. In general, it is not necessary to know {\it a-priori} the region of computation.
In fact, at each time step, we can check whether the reachable set is inside $D$, if not, we can choose new $D$ and recompute the error accordingly.
\bigskip
\begin{figure}[h]
\centering
\begin{minipage}[t]{0.45\linewidth}
\includegraphics[width=2.5in]{vanDerPol1.png}
\caption{Evolution of the Perturbed Van der Pol Oscillator: splitting performed at $t_1=0.6$ and $t_2=1.2$.}
\label{pvdp1}
\end{minipage}%
\hspace{0.5in}%
\begin{minipage}[t]{0.45\linewidth}
\includegraphics[width=2.5in]{vanDerPol2.png}
\caption{Evolution of the Perturbed Van der Pol Oscillator: no splitting performed.}
\label{pvdp2}
\end{minipage}
\end{figure}
Figures~\ref{pvdp1} and~\ref{pvdp2} show that our method is effective in practice for computing rigorous over-approximations
of the solution sets of nonlinear differential inclusions. To prove this, we compare the results
of computation of the algorithm presented here with
the ones given in \cite{KZ}.
\subsection{Perturbed Harmonic Oscillator}\label{PHO}
The equations for the perturbed harmonic oscillator are given by
\begin{align*}
& \dot x= y + v_1\\
& \dot y= -x + v_2,
\end{align*}
\noindent where $v_i$'s represent bounded noise. Suppose that the range of $v_1$ and $v_2$
is $[-A_1,A_1]$ and $[-A_2,A_2]$ respectively. Notice that noise is additive, and
therefore we can use formula \eqref{ine} to compute the (analytical) error.
In terms of our general set up we have $f(x,y)=(y,-x)$, $g_i=1$, for $i=1,2$.
Hence, we get $\Lambda=1$, $L=1$, $H=0$, and $K'=A_1+A_2$. The one step time error is then given by the
following formula
\[
\epsilon=\frac{7\,h^3\,}{4(2-h)}\,\frac{e^h-1}{h}\,\max\{A_1, A_2\}.
\]
For comparison purposes, Table \ref{table:pho1} is equivalent to a table given in \cite{KZ}.
The total time of computation is $T=2\pi$, $A_1=0$, and initial condition is the box $(1,0)+[-\delta,\delta]^2$.
Note that diameter of the set $[a_1,a_2]\times [b_1,b_2]\in \mathbb{R}^2$
is $\max \{a_2-a_1, b_2-b_1\}$, and radius of the set is half of diameter.
\noindent From Table \ref{table:pho1}, one can see that in most cases our results are better then one obtained
in \cite{KZ}.
In case~\ref{case:largestep}, the time step is $h=2\pi/9=0.698131$, for which the analytical error $\epsilon=0.066170$ is too large to hope for sharp results.
In case~\ref{case:smallstep}, handling the large number of time steps requires more sophisticated techniques for simplifying the representation of the intermediate sets than are currently used in our code, and this is the major contribution to the error.
\newcounter{testcase}
\begin{table}[ht]
\caption{Perturbed Harmonic Oscillator $T=2\pi$}
\centering
\vspace{0.1cm}
\begin{tabular}{c | c | c | c | c | c }
\hline\hline
case & $A_2$ & $\delta$ & num. of steps & Our Diameter & Diameter in \cite{KZ}\\ [0.3ex]
\hline
\refstepcounter{testcase}\arabic{testcase}\label{case:largestep} & 0.1 & 0.01 & 9 & {\bf 3.91258} & 1.178825 \\
\refstepcounter{testcase}\arabic{testcase} & 0.1 & 0.01 & 100 & {\bf 0.8382630} & 0.8453958 \\
\refstepcounter{testcase}\arabic{testcase}\label{case:smallstep} & 0.1 & 0.01 & 1000 & {\bf 65.4376} & 0.8225159 \\
\hline
\refstepcounter{testcase}\arabic{testcase} & 0.1 & 0 & 100 & {\bf 0.8186080} & 0.8253958 \\
\refstepcounter{testcase}\arabic{testcase} & 0.1 & 0.01 & 100 & {\bf 0.8382630} & 0.8453958 \\
\refstepcounter{testcase}\arabic{testcase} & 0.1 & 0.1 & 100 & {\bf 1.018708} & 1.025396 \\
\hline
\refstepcounter{testcase}\arabic{testcase} & 0.01 & 0.01 & 100 & {\bf 0.1018380} & 0.1025396 \\
\refstepcounter{testcase}\arabic{testcase} & 0.1 & 0.01 & 100 & {\bf 0.8382630} & 0.8453958 \\
\refstepcounter{testcase}\arabic{testcase} & 1 & 0.01 & 100 & {\bf 8.205280} & 8.273958 \\
\hline
\end{tabular}
\label{table:pho1}
\end{table}
When both $A_1$ and $A_2$ are nonzero, i.e. $A_1=A_2=0.1$, our results and results from
\cite{KZ} are given in Table \ref{table:pho2}.
Here, we present results only for smaller time steps, even though in \cite{KZ} the results were given for time steps up to $h=0.799$.
We give both second-order and third-order local error estimates.
We can see from Table \ref{table:pho2} that for $h=0.25$ we are starting to get significantly worse results then in \cite{KZ}, but for smaller time steps the results are comparable.
Here, the total time of computation is $T=h$ (one time step), and $\delta=0$.
\begin{table}[htb]
\caption{Perturbed Harmonic Oscillator $T=h$}
\centering
\vspace{0.1cm}
\begin{tabular}{c | c | c | c | c }
\hline\hline
case & h & Our Radius(2) & Our Radius(3) & Radius in \cite{KZ}\\ [0.3ex]
\hline
1 & 0.25 & {\bf 0.0420586} & {\bf 0.0313667} & 0.0284025 \\
2 & 0.1 & {\bf 0.0125864} & {\bf 0.0108419} & 0.0105171 \\
3 & 0.01 & {\bf 0.00102509} & {\bf 0.00100759} & 0.00100502 \\
4 & 0.001 & {\bf 0.00010026} & {\bf 0.00010009} & 0.00010005 \\
\hline
\end{tabular}
\label{table:pho2}
\end{table}
We see that the radius of the enclosure is dominated by the growth due to the noise in the differential inclusion.
The reason why our third-order error estimates give worse enclosures than those of~\cite{KZ} is unclear; however we note that the error estimates obtained there were computed exactly by hand, and our automated methods are better than those of~\cite{KZ} based on the logarithmic norm. Moreover, in~\cite{KZ} they use the 2-norm for the logarithmic norm which gives better results for this example.
\subsection{Rossler Equations}\label{RE}
The Rossler equations are given by
\begin{align*}
\dot x&=-(y+z) + v_1\\
\dot y&=x+0.2y + v_2\\
\dot z&=0.2+z(x-a) + v_3
\end{align*}
\noindent We aim to estimate the image of the initial set
\[ X_0= \{0\} \times [-10.3\times 10^{-4},+10.3\times10^{-4}] \times [-0.03\times 10^{-4},+0.03\times10^{-4}] \]
under the return map $P$ to the Poincar\'e section $\Sigma=\{x=0, \,\dot{x}>0\}$
for the parameter value $a=5.7$ and noise $v_i \in [-10^{-4},10^{-4}]$ for $i=1,2,3$.
Rather than compute the crossing time for each trajectory, we computed a time interval $T$ containing the first crossing time by comparing the sign of $x$ over the sets $R_k$, and used the estimate $\{0\}\times P(X_0) \subset R(X_0,T)$.
With time step $h=0.005$, total time $T=11.1$, and region of computation $D=([-25,25],[-25,25],[-25,35])$,
we obtain an analytical error of $e=8.586\cdot 10^{-8}$ and
\[
R(X_0,T)=([-0.15572,0.15391],[-3.75926,-3.41772],[0.03139,0.03398]).
\]
\noindent In \cite{KZ}, $R(X_0,T)=([-0.211150,0.20888],[-3.69781,-3.47352],[0.03117,0.03327])$. (They did not specify the time step or the total time it took to compute the value of the poincare map $R(X_0,T)$.)
In this case neither of the sets is better then other, but they are comparable, and
hence we show that our algorithm can also provide good estimates when computing
over rather difficult regions, see \cite{KZ}.
\section{Concluding Remarks}\label{Disc}
In this paper, we have given a numerical method for computing rigorous over-approximations of the reachable sets
of differential inclusions. The method gives high-order error bounds for single-step approximations, which is an improvement
of the first-order methods previously available. By providing improved control of local errors, the method allows for accurate computation of reachable sets over longer time intervals.
We give several theorems for obtaining local errors of different orders.
It is easy to see that higher order errors (improved accuracy) require approximations
that have larger number of parameters (reduced efficiency). The growth of the number of parameters is an issue, in general. Sophisticated methods for handling this are at least as important as the single-step method.
The question remains as to approximate solution (Theorems \ref{case1}-\ref{case3b}) yields the best trade-off between local accuracy and efficiency for computing reachable sets.
The answer is not straightforward and most likely depends on the system itself.
In future work, we plan to investigate the efficiency of the algorithm on the number of parameters for various examples.
We have only considered differential inclusions in the form of input-affine systems, and give a brief sketch of how these methods can be applied to other classes of system. We also plan to provide a more detailed exposition of the method in these cases. Moreover, the local error that we obtain is a uniform bound for the error in all components. It should be possible to give slightly better componentwise bounds.
\section{ACKNOWLEDGEMENTS}
This research was partially supported by the European Commission through the project ``Control for Coordination of Distributed Systems'' (C4C) as part of the EU.ICT program (challenge ICT-2007.3.7).
|
{
"timestamp": "2012-06-29T02:01:38",
"yymm": "1206",
"arxiv_id": "1206.6563",
"language": "en",
"url": "https://arxiv.org/abs/1206.6563"
}
|
\section{Nonlocal potential approximation}
\label{sec:NLPA}
We begin with an approximation scheme for the ordered state, where
we explicitly incorporate a finite order parameter into the invariant
effective action.
\subsection{NLPA for the ordered state}
Starting point of the NLPA approach
is an effective average action which consists of both a
nonlocal potential term,
which is restricted to second order in the invariant densities and is characterized by
the coupling function $u_\Lambda(k)$, and a local potential term
$U_\Lambda(\rho-\rho_\Lambda^0)$
which may be an arbitrary function of $\rho-\rho_\Lambda^0$, where
$\rho_\Lambda^0$ is the
(cutoff dependent)
order parameter density and $\rho=\bm{\varphi}^2/2$, where ${\bm \varphi}$ is
a field with $N$-components. To avoid double counting of
correlations, we define $u_\Lambda(k)$ to be completely nonlocal with $u_\Lambda(0)=0$.
We furthermore keep the full momentum dependence $\sigma_\Lambda(k)$ of the
quadratic term in the action,
and thus approximate the effective average action, after subtraction of the
noninteracting
contribution $(1/2)\int_k G_{0,\Lambda}^{-1}(k) \bm{\varphi}_{\bm k} \cdot
{\bm \varphi}_{- {\bm k}}$, as
\begin{align}
\Gamma_\Lambda[\varphi]&= \frac{1}{2} \int_k \big[ \sigma_\Lambda(k)
\bm{\varphi}_{\bm k} \cdot
{\bm \varphi}_{-{\bm k}} +
u_\Lambda(k)\Delta\rho_{\bm k} \Delta\rho_{-{\bm k}} \big] \nonumber \\
& \qquad + \int_
U_\Lambda(\rho-\rho_\Lambda^0) \, ,
\label{eq:action}
\end{align}
where $\Delta\rho_{\bm k}$ is the Fourier transform of $\rho({\bm x})-\rho_\Lambda^0$.
We use the notation $\int_k =\int d^D k/(2 \pi)^D$ and $\int_x = \int d^D x$
for integrals over momenta and integrals over coordinate space, respectively.
$U_\Lambda(\tau)$ can, for finite cutoff $\Lambda$, be expanded in $\tau$,
\begin{equation}
U_\Lambda(\tau)=\sum_n \frac{1}{n!} U_\Lambda^{(n)} \tau^n \, ,
\end{equation}
with $\tau=\rho-\rho_\Lambda^0$ or, for the symmetric scheme discussed in
subsection \ref{sec:nlpasym}, $\tau=\rho$.
Note that the effective action (\ref{eq:action})
does not contain all terms of a complete derivative approximation to order $q^2$, in which
the derivative term of the action would also include an expansion to all powers of
$\rho-\rho_\Lambda^0$. In the present scheme, one could easily improve
upon the action (\ref{eq:action}) by including e.g. also additional terms
which can
be parametrized by only one momentum. One obvious extension would
be to include a term
$\int_{x,y} [\rho({\bm x})-\rho_{\Lambda}^0]^2 [\rho({\bm y})-\rho_\Lambda^0]
\kappa({\bm x}-{\bm y})$,
with some function $\kappa({\bm x})$
which would be a generalization of
the $(\partial_\mu\rho)^2 (\rho-\rho_\Lambda^0)$
term encountered in a derivative expansion. Such an extension is both
straightforward and numerically feasible.
At the same time, the present approach goes well beyond the derivative
expansion in that it includes
the full momentum dependence in the first two terms of Eq.~(\ref{eq:action}).
As in the derivative expansion, the effective average action obeys
the full $O(N)$ invariance throughout the entire flow.
To determine the flow of $U_\Lambda$, we can use the standard
technique \cite{Wetterich93}
and evaluate the flow of $\Gamma_\Lambda[\bar{{\bm \varphi}}]$
for a homogeneous ($x$-independent) field $\bar{\bm \varphi}$ such that
$V^{-1}\Gamma_\Lambda[ \bar{\bm \varphi}]=U_\Lambda(\bar{\rho} -\rho_\Lambda^0)$
with $\bar{\rho}=\bar{\bm \varphi}^2/2$ and where $V$ is the volume.
We shall now assume that $N\geq 2$, so that there is at least one
gapless transverse mode. The flow of the local potential is then
given by \cite{Wetterich93}
\begin{align}
\partial_\Lambda U_\Lambda(\bar{\rho}-\rho_\Lambda^0)
&=\frac{1}{2}\int_k \partial_\Lambda R_\Lambda(k)\big\{
\bar{G}_{\Lambda,\parallel}(k,\bar{\rho}) \nonumber \\ &\qquad +
(N-1)\bar{G}_{\Lambda,\perp}(k,\bar{\rho}) \big\} \, ,
\label{eq:flowU}
\end{align}
where
\begin{subequations}
\begin{align}
\bar{G}_{\Lambda,\perp}^{-1}(k,\bar{\rho})&=
\sigma_\Lambda(k)+
U^\prime_\Lambda(\bar{\rho}-\rho_\Lambda^0)+G_{0,\Lambda}^{-1}(k) \, ,
\label{eq:Gperprho}
\\
\bar{G}_{\Lambda,\parallel}^{-1}(k,\bar{\rho})&= \sigma_\Lambda(k)+
2\bar{\rho} [u_\Lambda(k)+U^{\prime\prime}_\Lambda(\bar{\rho}-\rho_\Lambda^0)]
\nonumber \\ & \qquad
+
U^\prime_\Lambda(\bar{\rho}-\rho_\Lambda^0)+G_{0,\Lambda}^{-1}(k) \, .
\label{eq:Gparallelrho}
\end{align}
\end{subequations}
Here, the cutoff regulated noninteracting Green's function is
\begin{equation}
G_{0,\Lambda}^{-1}(k)=k^2 + R_\Lambda(k) \, ,
\end{equation}
and $R_\Lambda(k)$ is a regulator for small momenta with $k\lesssim \Lambda$.
The only difference of Eq.~(\ref{eq:flowU})
to the standard form used in a derivative expansion
of $\Gamma_\Lambda$ is the presence of the full functions $u_\Lambda(k)$
and $\sigma_\Lambda(k)$
in Eqs.~(\ref{eq:Gperprho}) and (\ref{eq:Gparallelrho})
rather than just their leading terms of a
$k$-expansion.
To determine the flows of $\sigma_\Lambda(k)$ and
$u_\Lambda(k)$ we invoke a field expansion of
$\Gamma_\Lambda[\varphi]$ in terms of
$\Delta\varphi_k^a=\varphi_k^a-\varphi_\Lambda^0 \delta_{a1} \delta_{k,0}$
with $\rho_\Lambda^0=(\varphi_\Lambda^0)^2/2$.
Here we have, without
loss of generality, assumed an order parameter field $\varphi_\Lambda^0$
which is directed in the $a=1$ direction of the internal space.
To determine the flows of $\sigma_\Lambda$ and $u_\Lambda$, we need
the lowest order irreducible vertices (up to four-point), which have the
form
\begin{subequations}
\begin{align}
\Gamma_{\Lambda,ab}^{(2)}({\bm k},-{\bm k})&=\delta_{ab}\sigma_\Lambda (k) +
2 \delta_{a1}\delta_{b1} \rho_\Lambda^0
\tilde{u}_\Lambda(k) \, ,
\\
\Gamma_{\Lambda,abc}^{(3)}({\bm k}_1,{\bm k}_2,{\bm k}_3) &= \varphi_\Lambda^0
\Big[\delta_{a1} \delta_{bc} \tilde{u}_\Lambda(k_1)
+\delta_{b1}\delta_{ac} \tilde{u}_\Lambda(k_2)\nonumber \\
& \quad
+\delta_{c1}\delta_{ab} \tilde{u}_\Lambda(k_3) \Big]
+ (\varphi_\Lambda^0)^{3}U_\Lambda^{(3)}
\delta_{a1}\delta_{b1}\delta_{c1} \, ,
\\
\Gamma_{\Lambda,abcd}^{(4)}({\bm k}_1\dots {\bm k}_4)&=\delta_{ab}\delta_{cd}
\tilde{u}_\Lambda(k_{12})+\delta_{ac}\delta_{bd}
\tilde{u}_\Lambda(k_{13}) \nonumber \\
& \quad +
\delta_{ad}\delta_{bc} \tilde{u}_\Lambda(k_{14})
+ 2\rho_\Lambda^0 U_\Lambda^{(3)}
\Big[\delta_{ab}\delta_{cd} \nonumber \\
& \quad
\times (\delta_{a1}+\delta_{c1}) +
\delta_{ac}\delta_{bd}(\delta_{a1}+\delta_{b1})
\nonumber \\
& \quad + \delta_{ad}\delta_{bc}(\delta_{a1}+\delta_{b1}) \Big]
\nonumber \\
& \quad
+4(\rho_\Lambda^0)^2 U_\Lambda^{(4)}
\delta_{a1}\delta_{b1}\delta_{c1}\delta_{d1} \, ,
\end{align}
\end{subequations}
where we defined $\tilde{u}_\Lambda(k)=u_\Lambda(k)+U_\Lambda^{(2)}$ and
$k_{ij}=|{\bm k}_i+{\bm k}_j|$.
The flow of the order parameter follows from the requirement
that $\partial_\Lambda\Gamma_\Lambda^{(1)}=0$. This yields \cite{Schuetz06}
\begin{align}
\partial_\Lambda \rho_\Lambda^0 &=\frac{-1}{2 \tilde{u}_\Lambda(0)}
\int_q\Big\{ \big[
\tilde{u}_\Lambda(0)+2\tilde{u}_\Lambda(q)+2\rho_\Lambda^0 U_\Lambda^{(3)} \big]
\dot{G}_{\Lambda,\parallel}(q) \nonumber
\\ & \qquad\qquad\qquad
+(N-1) \tilde{u}_\Lambda(0) \dot{G}_{\Lambda,\perp}(q) \Big\} \, ,
\label{eq:flowrho}
\end{align}
where $G_{\Lambda,\alpha}(k)=\bar{G}_{\Lambda,\alpha}(k,\rho_\Lambda^0)$ for
$\alpha=\perp,\parallel$ and
$\dot{G}_{\Lambda,\alpha}(k)=- G_{\Lambda,\alpha}^2(k) \partial_\Lambda R_\Lambda(k)$.
The flow of $\sigma_\Lambda(k)$ follows from the flow of $\Gamma_{\Lambda,\perp}^{(2)}(k)
=\Gamma_{\Lambda,aa}^{(2)}(k,-k)$
where $a\neq 1$ is a direction transverse to the order parameter field.
\begin{align}
\partial_\Lambda \sigma_\Lambda(k) &=
\int_q \Big\{ \dot{G}_\perp(q) \tilde{u}_\Lambda(q^\prime)-
\dot{G}_\parallel(q)\tilde{u}_\Lambda(q)\Big \} \nonumber \\
& \quad -2 \rho_\Lambda^0 \int_q \Big\{\dot{G}_\parallel(q^\prime)
G_\perp(q) \tilde{u}_\Lambda^2(q^\prime) \nonumber \\
& \qquad \qquad +
\dot{G}_\perp(q^\prime) G_\parallel(q)\tilde{u}_\Lambda^2(q) \Big\} \, ,
\label{eq:flowsigma}
\end{align}
and we defined $q^\prime=|\bm{k}+\bm{q}|$.
The flow equation of $\tilde{u}_\Lambda(k)$ can be obtained from the flow of
$\Gamma_{\Lambda,\parallel}^{(2)}(k)=\Gamma_{\Lambda, 11}^{(2)}(k,-k)$
which reads
\begin{align}
\partial_\Lambda \Gamma_{\Lambda,\parallel}^{(2)}(k)&=
\frac{1}{2}\int_q \Big\{(N-1)\dot{G}_{\Lambda,\perp}(q)\big[\tilde{u}_\Lambda(0)
+2 \rho_\Lambda^0 U_\Lambda^{(3)}\big]
\nonumber \\
& \qquad \qquad
+ \dot{G}_{\Lambda,\parallel}(q)\big[\tilde{u}_\Lambda(0)+2\tilde{u}_\Lambda(q^\prime)
\nonumber \\
& \qquad \qquad \qquad
+12 \rho_\Lambda^0 U_\Lambda^{(3)}
+4 (\rho_\Lambda^0)^2 U_\Lambda^{(4)}\big] \Big\} \nonumber \\
&
-2\rho_\Lambda^0\int_q\Big\{ (N-1)\dot{G}_{\Lambda,\perp}(q^\prime)G_{\Lambda,\perp}(q)\tilde{u}_\Lambda^2(k)
\nonumber \\
& \qquad \qquad
+\dot{G}_{\Lambda,\parallel}(q^\prime)G_{\Lambda,\parallel}(q) \big[\tilde{u}_\Lambda(q) +\tilde{u}_\Lambda(q^\prime)
\nonumber \\
& \qquad \qquad \qquad
+\tilde{u}_\Lambda(k)+2 \rho_\Lambda^0 U_\Lambda^{(3)} \big]^2
\Big\}
\nonumber \\
&
+\big[ \tilde{u}_\Lambda(0)+2 \tilde{u}_\Lambda(k) +2 \rho_\Lambda^0 U_\Lambda^{(3)}\big] \partial_\Lambda \rho_\Lambda^0 \, .
\label{eq:flowparallel}
\end{align}
\begin{table*}[ht]
\caption{Values for the anomalous dimension $\eta$ for various
$N$ and $D=3$
from different
approaches. The columns correspond to the symmetric NLPA, the NLPA
for the ordered phase, results from the background field scheme (BMW)
\cite{Benitez09}, the first order derivative expansion (DE), field theory (FT),
variational perturbation theory (VPT) \cite{Kleinert99} and Monte Carlo (MC).
\label{Tab:eta}}
\begin{ruledtabular}
\begin{tabular}{l l l l l l l l }
N & sym. NLPA & ord. NLPA & BMW & DE & FT & VPT & MC \\
\\
0 & 0.042 & & 0.034 & 0.039 \cite{Gersdorff01}& 0.0272(3)
\cite{Pogorelov08} & 0.031(1) & 0.0303(3) \cite{Grassberger97}
\\
1& 0.042 & & 0.039 & 0.0443 \cite{Canet03}
& 0.0318(3) \cite{Pogorelov08} &0.034(7)& 0.03627(1) \cite{Hasenbusch10}
\\
2& 0.041(5) & 0.049 & 0.041 & 0.049 \cite{Gersdorff01}
& 0.0334(2) \cite{Pogorelov08} &0.035(6)& 0.0381(2) \cite{Campostrini06}
\\
3& 0.040 & 0.046 & 0.040 & 0.049 \cite{Gersdorff01}
& 0.0333(3) \cite{Pogorelov08}&0.035(0)& 0.0375(5) \cite{Campostrini02} \\
4& 0.038 & 0.042 & 0.038 & 0.047 \cite{Gersdorff01}& 0.0350(45)
\cite{Guida98} &0.031& 0.0365(10) \cite{Hasenbusch01}
\\
10& 0.026 & 0.024(5) & 0.022 & 0.028 \cite{Gersdorff01} &0.024 \cite{Antonenko95}& 0.0216 & \\
\end{tabular}
\end{ruledtabular}
\end{table*}
Combining Eqs.~(\ref{eq:flowrho}), (\ref{eq:flowsigma}),
(\ref{eq:flowparallel}), and keeping in mind that
$\Gamma_{\Lambda,\parallel}^{(2)}(k)=\sigma_\Lambda(k)+2\rho_\Lambda^0
\tilde{u}_\Lambda(k)$, one finds
\begin{align}
\partial_\Lambda \tilde{u}_\Lambda(k)& =\frac{1}{2\rho_\Lambda^0}
\int_q \big[\dot{G}_{\Lambda,\parallel}(q)-\dot{G}_{\Lambda,\perp}(q) \big]
\tilde{u}_\Lambda(q^\prime)
\nonumber \\ &
+ \int_q \dot{G}_{\Lambda,\parallel}(q) \Big\{2 U_\Lambda^{(3)}+\rho_\Lambda^0 U_\Lambda^{(4)}
\nonumber \\ & \qquad
-U_\Lambda^{(3)} \big[\tilde{u}_\Lambda(q)+ \rho_\Lambda^0
U_\Lambda^{(3)}\big]/\tilde{u}_\Lambda(0) \Big\}
\nonumber \\ &
-\int_q\Big\{ (N-1)\dot{G}_{\Lambda,\perp}(q^\prime)G_{\Lambda,\perp}(q)\tilde{u}_\Lambda^2(k)
\nonumber \\
& \qquad \qquad
+\dot{G}_{\Lambda,\parallel}(q^\prime)G_{\Lambda,\parallel}(q) \big[\tilde{u}_\Lambda(q) +\tilde{u}_\Lambda(q^\prime)
\nonumber \\
& \qquad \qquad\qquad \qquad
+\tilde{u}_\Lambda(k)+2 \rho_\Lambda^0 U_\Lambda^{(3)} \big]^2
\Big\}
\nonumber \\
&
+\int_q \Big\{\dot{G}_\parallel(q^\prime)
G_\perp(q) \tilde{u}_\Lambda^2(q^\prime) \nonumber \\
& \qquad \qquad +
\dot{G}_\perp(q^\prime) G_\parallel(q)\tilde{u}_\Lambda^2(q) \Big\} \, .
\label{eq:flowu}
\end{align}
This completes the derivation of the flow equations, which are uniquely determined by the
effective action (\ref{eq:action}). The flow Eqs.~(\ref{eq:flowU},\ref{eq:flowrho},\ref{eq:flowsigma},\ref{eq:flowu})
form a closed set which can be used to calculate the full momentum dependence of the self-energies
in a controlled manner and the only approximation is the form of the effective
action as stated in Eq.~(\ref{eq:action}).
By construction, the approach reproduces exactly the correct structure
of the leading order perturbation theory, which is dominant
at large momenta. Also by construction,
it reproduces
the leading terms in a derivative expansion of both $u_\Lambda(k)$ and
$\sigma_\Lambda(k)$ to lowest order in the fields,
which dominate the behavior in the infrared. The same is true also for
the symmetric scheme which we discuss below.
\begin{figure}[ht]
\includegraphics[angle=-90,width=7cm]{fig.eps}
\caption{Dependence of the anomalous dimension $\eta$ on
the order $n$ of the polynomial approximation of the local potential $U_\Lambda(y)=
\sum_{j=0}^n U_\Lambda^{(j)} y^j /j!$ for the symmetry broken phase
(upper curve) and the symmetric
scheme (lower curve) in which the fixed
point is approached from within the symmetric phase. Values shown are for
$N=2$ and $D=3$ .
} \label{fig:etavsp}
\end{figure}
\subsection{
NLPA for the symmetric state}
\label{sec:nlpasym}
We now derive flow equations which are valid for the symmetric
phase, which are even simpler. In the NLPA
for the symmetric state
the distance to the critical point is controlled by a
mass term $r_\Lambda$ in the propagator which vanishes at criticality
in the limit $\Lambda\to 0$. We write
the Ansatz for $\Gamma_\Lambda$ in the NLPA as
\begin{align}
\Gamma_\Lambda[\varphi]&= \frac{1}{2} \int_k
\Big\{ [\sigma_\Lambda(k)+r_\Lambda] \bm{\varphi}_{\bm k} \cdot
{\bm \varphi}_{-{\bm k}} +
u_\Lambda(k) \rho_{\bm k} \rho_{-{\bm k}} \Big\} \nonumber \\
& \qquad + \int_
U_\Lambda(\rho) \, ,
\label{eq:actionsym}
\end{align}
where $\rho_k$ is the Fourier transform of $\rho({\bm x})={\bm \varphi}^2({\bm
x})/2$ and where we put $U_\Lambda^{(1)}=0$ to avoid double counting of the
mass
term which is already accounted for by $r_\Lambda$.
The action (\ref{eq:actionsym}) yields again unique
flow equations for $r_\Lambda$ and the functions $U_\Lambda(\rho)$,
$\sigma_\Lambda(k)$ and $u_\Lambda(k)$
which can be easily derived.
We define the vertices now as expansion coefficients of
$\Gamma_\Lambda$ around ${\bm \varphi}=0$.
The flow for the two-point vertex
$\Gamma_{\Lambda,ab}^{(2)}(k,-k)=\delta_{ab} \Sigma_\Lambda(k)$
is then
\begin{equation}
\partial_\Lambda \Sigma_\Lambda(k)=\frac{1}{2} \int_q \dot{G}_\Lambda(q)
\big[ 2 \tilde{u}_\Lambda(q^\prime)+N \tilde{u}_\Lambda(0) \big] \, ,
\end{equation}
where $\Sigma_\Lambda(k) = r_\Lambda + \sigma_\Lambda(k)$ with
$\sigma_\Lambda(0)=0$ and where
$\dot{G}_\Lambda(q)=-G_\Lambda^2(q)\partial_\Lambda R_\Lambda(q)$
with $G_\Lambda^{-1}=G_{0,\Lambda}^{-1}+\Sigma_\Lambda(k)$.
The flow of the two-point vertex is in the symmetric phase
not sufficient to extract also the flow of $\tilde{u}_\Lambda (k)=u(k)+U^{(2)}$ and
we must extract its flow from the four-point vertex. This yields
\begin{align}
\partial_\Lambda \tilde{u}_\Lambda(k)&=\frac{4+N}{2} \int_q
\dot{G}_\Lambda(q) U_\Lambda^{(3)} -\int_q \dot{G}_\Lambda(q) G_\Lambda(q^\prime)
\Big\{
\nonumber \\ & \quad
(N-1) \tilde{u}_\Lambda(k)^2+\big[\tilde{u}_\Lambda(k)+
\tilde{u}_\Lambda(q^\prime)+\tilde{u}_\Lambda(q)\big]^2 \Big\}
\nonumber \\ & \quad
-\int_q \dot{G}_\Lambda(q) G_\Lambda(q) \Big\{
\nonumber \\ & \qquad
[\tilde{u}_\Lambda(q^\prime)-\tilde{u}_\Lambda(q)]
[\tilde{u}_\Lambda(0)+2\tilde{u}_\Lambda(q)]\Big\} \, ,
\end{align}
with $q^\prime=|{\bm k}+{\bm q}|$.
The flow of the local potential $U_\Lambda(\rho)$ is given by
Eq.~(\ref{eq:flowU})
with
$\rho_\Lambda^0=0$ and with $\sigma_\Lambda(k)$
replaced by $\sigma_\Lambda(k)+r_\Lambda$
in Eqs.~(\ref{eq:Gperprho}) and (\ref{eq:Gparallelrho}).
\section{Results}
\label{sec:results}
We have solved the flow equations both in the symmetric phase and the symmetry
broken phase numerically for $D=3$ and different values of $N$. For $D=3$ the
field expansion of the local potential actually converges
relatively fast \cite{Canet03} so that one can work with a finite
order approximation of the local
potential.
We have used an expansion of $U_\Lambda(\rho)$ up to eighth
order in $\rho$ in both the symmetric and the symmetry broken scheme
and have checked that the values of the anomalous dimension $\eta$ are already converged
at this level of truncation. The convergence can clearly be seen in
Fig.~\ref{fig:etavsp} where we show the values of $\eta$ for
different maximal powers of $\rho$. All results presented below were
calculated with all terms up to order $\rho^8$.
\begin{table*}[ht]
\caption{
Values for the anomalous dimension $\nu$ for various
$N$ and $D=3$
from different
approaches. The columns correspond to the symmetric NLPA, the NLPA
for the ordered state, results from the background field scheme (BMW)
\cite{Benitez09}, the first order derivative expansion (DE), field theory
(FR),
variational perturbation theory (VPT) \cite{Kleinert99} and Monte Carlo (MC).
\label{Tab:nu}}
\begin{ruledtabular}
\begin{tabular}{l l l l l l l l}
N & sym. NLPA &ord. NLPA & BMW & DE & FT &VPT& MC \\
\\
0& 0.58 & &0.589 & 0.590 \cite{Gersdorff01} & 0.5886(3) \cite{Pogorelov08} & 0.5883 & 0.5872(5) \cite{Pelissetto07} \\
1& 0.62 & &0.632 & 0.6307 \cite{Canet03} & 0.6306(5)
\cite{Pogorelov08}& 0.6305 & 0.63002(10) \cite{Hasenbusch10} \\
2& 0.66 &0.68 &0.674 & 0.666 \cite{Gersdorff01}&
0.6700(6) \cite{Pogorelov08} & 0.6710 & 0.6717(1) \cite{Campostrini06} \\
3& 0.70 &0.72 &0.715 & 0.704 \cite{Gersdorff01} & 0.7060(7) \cite{Pogorelov08}& 0.7075 & 0.7112(5) \cite{Campostrini02}\\
4 & 0.74 &0.76 &0.754 & 0.739 \cite{Gersdorff01}& 0.741(6)
\cite{Guida98}& 0.737 & 0.749(2) \cite{Hasenbusch01} \\
10& 0.89 & 0.89& 0.889 & 0.859 \cite{Gersdorff01}& 0.859 \cite{Antonenko95} & 0.866 \\
\end{tabular}
\end{ruledtabular}
\end{table*}
For numerical stability we choose an exponential cutoff,
\begin{equation}
R_\Lambda(q^2)=Z^{-1}_\Lambda \alpha \frac{q^2}{\exp(q^2/\Lambda^2)-1} \, ,
\end{equation}
where $Z_\Lambda^{-1}=1+\partial_{k^2} \sigma_\Lambda(k)|_{k=0}$ is the wavefunction
renormalization.
Usually the prefactor $\alpha$
is tuned in such a way as to extremize the critical exponents,
e.g. the anomalous dimension
\begin{equation} \label{eq:eta}
\eta=\Lambda\partial_\Lambda \ln Z_\Lambda \, .
\end{equation} This ensures a minimal sensitivity
of the results to small variations in $\alpha$ \cite{Canet03}.
In the present scheme we do not observe an extremal value of $\eta$
as a function of $\alpha$. Instead, we observe a steady decrease
of $\eta$ when $\alpha$ is increased and a minimum which is only
reached asymptotically for large $\alpha$. For the symmetric scheme,
the dependence of $\eta$ on $\alpha$ is already essentially flat
for $\alpha\geq 5$ and we choose $\alpha=5$ for our analysis below.
Similarly, in the symmetry broken phase
only a small decrease of $\eta$ is detected on increasing
$\alpha$ from 1 to 2 and $\eta$ is then essentially unchanged up to $\alpha =
3$. We fixed $\alpha=2$ for the analysis below.
\subsection{Critical exponents $\eta$ and $\nu$}
\label{sec:resultsa}
The value of $\eta$ can be easily determined from the flow of the
quantity $\sigma_\Lambda(k)$ and its low momentum structure via Eq.~(\ref{eq:eta}).
To determine the thermal exponent $\nu$, we use in the symmetric
phase the value of
the fully renormalized mass term $r_*=\lim_{\Lambda \to 0} r_\Lambda$ which
scales as $r_*\simeq (r_{\Lambda_0} - r_c)^{2 \nu}$ where $r_c$ is the critical
value of the mass term at the initial cutoff scale $\Lambda_0$.
Similarly, in the symmetry broken phase we analyse
the scaling of the order parameter $\rho_*=\lim_{\Lambda \to 0} \rho_\Lambda$
which scales as $\rho_*\simeq (\rho_{\Lambda_0}-\rho_c)^{2 \beta}$ where $\beta$
is the critical exponent of the order parameter and $\rho_c$ the critical
value of $\rho_\Lambda$ at the initial scale $\Lambda_0$.
From $\beta$ and $\eta$ we can extract
$\nu$ via the hyperscaling relation $\nu=2\beta/(D-2+\eta)$.
In Tables \ref{Tab:eta} and \ref{Tab:nu} we show our results
for the critical exponents $\eta$ and $\nu$ and compare them with
results from various other approaches.
Somewhat
surprisingly, and in contrast to what is observed in a standard derivative expansion, the
results for the critical exponents are generally better in the scheme where
one approaches
the critical point from the symmetric side, where a field expansion
around $\rho=0$ rather than around a finite value $\rho^0$ is employed.
As can be seen from Table \ref{Tab:eta}, the values for $\eta$ in the
symmetric scheme are, except for $N=0$, quite close to those
obtained within the BMW scheme of Ref.~\cite{Benitez09}.
For large $N$, it is known that $\eta$ behaves as $\eta=0.27/N$ \cite{Moshe03}, and
the result from the symmetric scheme for $N=10$ is already close
to this value.
The results for the approach from the symmetry broken
phase are similar to those of the leading order derivative expansion
(where terms up to order ${\cal O}(q^2)$ are kept),
see Table \ref{Tab:eta}. A possible reason for the inferior accuracy of
the scheme for the symmetry broken
phase compared with the accuracy of the symmetric scheme
is that all nonlocal correlations are determined already from the
two-point function whereas in the symmetric scheme the
nonlocal potential flow is determined from the four-point function.
Including further terms in the effective action is expected to
improve also the results of the symmetry broken NLPA.
The results for the thermal exponent $\nu$ are similar in both schemes
and generally close to the most accurate MC results with deviations
never more than about 3\%. Our values are also close
to values from other approaches.
\subsection{Beyond the universal regime}
\label{sec:resultsb}
Both the schemes for the symmetric and
the symmetry broken phase reproduce the logarithmic behavior
of the self-energy at large momenta
$\Sigma(k)\simeq u_{\Lambda_0}^2 \ln (k/u_{\Lambda_0})$
which can
be derived from perturbation theory \cite{Baym01}. To assess the accuracy
of the calculated self-energy over the whole momentum regime a useful
quantity is the small $u_{\Lambda_0}$ limit of
the one-dimensional integral ($\zeta(z)$ is the Riemann zeta function)
\begin{equation}
c=\frac{128 }{3 \pi u_{\Lambda_0}} \zeta[3/2]^{-4/3} \int_0^\infty dq
\frac{\Sigma(q)}{q^2+\Sigma(q)} \, ,
\label{Eq:defc}
\end{equation}
where $\Sigma(q)$ is the full self-energy at criticality,
$\Gamma_{\Lambda,ab}^{(2)}(k,-k)=\delta_{ab} \Sigma_\Lambda (k)$ and
$\Sigma(k)=\lim_{\Lambda \to 0} \Sigma_\Lambda(k)$.
The quantity $c$ is finite in the limit $u_{\Lambda_0}/\Lambda_0 \to 0$
and has physical significance for $N=2$ where it relates to
the suppression of the critical temperature of the weakly interacting
Bose gas in $D=3$ dimensions \cite{Baym01}. The integral in Eq.~(\ref{Eq:defc}) is
dominated by contributions from the crossover regime $k\simeq u_{\Lambda_0}$
where the momentum dependence of the self-energy changes from the
perturbative $\ln (k)$ behavior at large momenta to the anomalous $k^{2-\eta}$
scaling at small momenta. The value of $c$ has been estimated
for different $N$ from Monte Carlo simulations
\cite{Arnold01,Kashurnikov01,Sun03} and has been used
to quantify the accuracy of various approaches
\cite{Ledowski04,Kastening04,Benitez09}. To calculate $c$ we
used a small initial value of $u_{\Lambda_0}$,
$u_{\Lambda_0}/\Lambda_0=0.001$.
Again we find
generally better values for the symmetric scheme. For $N=2$, the value is
about $15 \%$ too high when compared to MC results and
for $N=1$ the difference is slightly larger. For $N= 4$ the
difference is less than 5\%. In comparison with the BMW scheme the
differences are 8\% for $N=2$ and rapidly decrease for larger $N$,
see Table \ref{Table:c}.
\begin{table*}[ht]
\caption{Values for the quantity $c$ defined in Eq.~(\ref{Eq:defc}), from
both the symmetric NLPA and the NLPA for the ordered state
as well as from a perturbative FRG approach (PFRG), the background field scheme (BMW),
variational perturbation theory (VPT) and Monte Carlo (MC).}
\label{Table:c}
\begin{ruledtabular}
\begin{tabular}{l l l l l l l}
N & sym. NLPA & ord. NLPA & PFRG \cite{Ledowski04} & BMW \cite{Benitez09} & VPT \cite{Kastening04} & MC \\
\\
1& 1.38 & & &1.15 & 1.07(10)& 1.09(9) \cite{Sun03} \\
2& 1.49 & 1.60 & 1.23 &1.37 & 1.27(10) & 1.29(5) \cite{Kashurnikov01} \\
& & & & & & 1.32(2) \cite{Arnold01} \\
3& 1.59 & 1.72 & &1.50 & 1.43(11) & \\
4& 1.68 & 1.82 && 1.63 & 1.54(11) & 1.60(10) \cite{Sun03} \\
10& 2.02 & 2.11 && 2.02 & & \\
\end{tabular}
\end{ruledtabular}
\end{table*}
\section{Conclusions}
\label{sec:conclusions}
We have presented a straightforward nonlocal potential approximation
which allows access to finite momentum properties of correlation
functions and also allows for an accurate calculation of critical
exponents. As in the derivative expansion, in the NLPA
all truncations are done at the level of the effective
action, a property it
shares with the derivative expansion. This allows for a strict control of the
symmetries of the underlying model and also allows for
extensions of the approach. While the present approach includes both
terms of arbitrary powers in the fields (in the local term), the nonlocal
terms are restricted up to fourth order in the fields.
In contrast, in
the BMW scheme \cite{Benitez09} all vertices have a momentum dependence
which is however only approximately taken into account.
The present scheme can easily be
extended by including for example terms of the
type $\rho({\bm x}) \rho({\bm y})^2 \kappa({\bm x}-{\bm y})$
which
would result in momentum dependent vertices with up to six legs
and
similar terms of higher order in the densities can of course also easily be
constructed. In $D=3$ it might suffice to limit such terms only to a small maximal
power in $\rho$ to
get converged values
for the critical exponents.
For each such additional term a new coupling function must be
introduced so the nonlocality in the scheme will always be restricted to a
finite order in the fields.
The computational cost of such an extension is
relatively modest, since one would still deal with the flow of a small
number
of one-parameter functions. In contrast, in the BMW
the flow must be analysed for a two point function which is defined on a two-dimensional
grid, one dimension each for the dependence on fields and momenta, which
is numerically more difficult.
Even in the the simplest NLPA truncation analysed here, the results for both
the critical exponents and for the momentum dependence of the
two-point function are already surprisingly good and the present scheme offers
a direct access to both universal and non-universal quantities.
The present scheme is also certainly useful for more complex models
where
even the local terms are restricted to a finite
order in the fields \cite{Braghin10,Sinner09,Sinner10,Dupuis09,Kownacki09,Eichler09}.
We thank
Pawel Jakubczyk,
Andreas Eberlein, and Federico Benitez for discussions.
This work was supported by the DFG research group FOR 723.
|
{
"timestamp": "2012-06-28T02:00:40",
"yymm": "1206",
"arxiv_id": "1206.6121",
"language": "en",
"url": "https://arxiv.org/abs/1206.6121"
}
|
\section{Introduction}
The propagation of the liquid-fluid interfaces through porous media
is central to a wide range of natural phenomena and industrial
applications, with the latter including enhanced oil recovery,
hydrogeology, fuel cells, carbon dioxide sequestration to mention
but a few. This topic remains the subject of intensive research,
both experimental and theoretical, comprehensively reviewed in a
number of articles over the past forty years
\cite{Wooding-review:1976,Payatakes-review:1982,Brenner-review88,Olbricht-review-1996,Alava-review04}.
The main aspects of research have been the rate of propagation of
the wetting front
\cite{Labajos-Broncano-1999,Schoelkopf:2000,Gombia:2008,Reyssat:2009},
the wetting front's roughening \cite{Horvath:95,Soriano05} and
stability \cite{Tullis:2007}, as well as the related problems of the
formation and dynamics of the pockets of the displaced phase
(bubbles, ganglia) left behind the front
\cite{Payatakes-review:1982,Bernadiner-1998,Suekane:2010}. The first
of these aspects is of particular importance as it ultimately
determines the main macroscopic characteristics of the process in
many applications.
As has been discovered experimentally more than a decade ago by
Delker and his co-workers \cite{Delker-etal-1996}, besides the
common situation where a wetting front propagates through a porous
medium broadly in accordance with Washburn's model
\cite{Washburn-1921}, which balances the driving force due to the
(presumed constant) capillary pressure of a meniscus and viscous
resistance as in the Poiseuille-type flow, for some media, such as
porous matrices made of packed spherical beads, the initial
Washburn-type imbibition is followed by a completely different and
in many ways `anomalous' regime. A representative set of data taken
from \cite{Delker-etal-1996} is shown in Fig.~\ref{fig:Delker96}.
Similar results have been reported later by Lago and Araujo
\cite{Lago-Araujo-2001}. The essence of the discovered effect is
that, if the height $h$ of the capillary rise (measured from some
initial level to remove from consideration the entrance effects) is
plotted against time $t$ on the $\log$-$\log$ scale
(Fig.~\ref{fig:Delker96}), one can immediately see two distinct
regions. Roughly speaking, for about two minutes the liquid climbs
2/3 of its eventual (maximum) height $h_{max}$ in the Washburn-like
regime after which it takes many hours for it to advances across the
remaining 1/3 of $h_{max}$, with the wetting front moving in
small-amplitude jumps on the pore scale \cite{Lago-Araujo-2001}. The
$\log$-$\log$ plot of this second regime shows a clear
concave-convex sequence which indicates that the dynamics there is
more complex than what one would expect from some power-law fit and
the accompanying arguments. Another intriguing feature of the
phenomenon is that $h_{max}$ is determined by the balance of
capillarity and gravity, i.e.\ the factors that, together with
viscous resistance, determine the dynamics of the Washburn regime,
although what looked like the Washburn regime has been abandoned
after a couple of minutes from the onset of the capillary rise and
for hours the process is distinctly non-Washburnian.
The experimental data has been discussed qualitatively in terms of
interface pinning and random capillary forces
\cite{Delker-etal-1996,Lago-Araujo-2001}, but on the quantitative
level the only outcome is that a simple equation
\begin{equation}
\label{Delker-fit}
dh/dt=v_0(F/F_T-1)^\beta
\end{equation}
expressing the rate of the capillary rise as a function of a
driving force $F$ and a threshold value $F_T$ leads to an
``anomalously large'' exponent $\beta$ \cite{Delker-etal-1996}, so
that $h$ deviates from the experimental data for small times and
unphysically diverges as time goes to infinity. The dashed line in
Fig.~\ref{fig:Delker96} corresponds to
\begin{equation}
\label{actual-fit}
h=H_c - (H_c-h_1)[1+A(t-t_1)]^{1/(1-\beta)}
\end{equation}
that has been deduced from (\ref{Delker-fit}) and used in
\cite{Delker-etal-1996}; the values of the constants $H_c$, $h_1$,
$A$, $t_1$ and $\beta$ are given in \cite{Delker-etal-1996}.
Although the fitting curve (\ref{actual-fit}) is able to describe
only a finite time span, the general ideas of the interface
pinning and random forces that might de-pin the interface and
allow it to move further seem fruitful, and a question that arises
naturally is how to embed them into the regular framework of
continuum mechanics of porous media, as opposed to just using {\it
ad hoc\/} semi-empirical equations for the wetting front evolution
in a one-dimensional flow. Below, we address this question on the
basis of an earlier developed approach to the modelling of the
wetting front dynamics in porous media based on considering
different modes of motion that menisci go through on the pore
scale and the corresponding technique of conjugate problems
\cite{SS-11a}. Then, we will discuss how the experimental
phenomenon in question is seen through other modelling approaches.
\section{Macroscopic (Darcy-scale) description}
In the continuum framework, the wetting front $\partial\Omega_1$
is a moving boundary which, together with other boundaries
$\partial\Omega_2$, confines a domain $\Omega$ where the Darcy
equation,
\begin{equation}
\label{Darcy-1}
\mathbf{u}=-(\kappa/\mu)\nabla (p+\rho gz),
\qquad(\mathbf{r}\in\Omega),
\end{equation}
and the continuity equation, $\nabla\cdot\mathbf{u}=0$, for the
average velocity $\mathbf{u}$ and pressure $p$ operate. Then, the
wetting front evolution is part of the solution of a properly
formulated problem for these bulk equations. The Darcy equation
(\ref{Darcy-1}) is written in the form already accounting for
gravity with $\rho$ being the density of the liquid, $g$ the
gravitational acceleration, $z$ the coordinate directed against
gravity, $\kappa$ the permeability of the porous matrix and $\mu$
the liquid's viscosity; the coordinates are represented in terms
of the position vector $\mathbf{r}$; hereafter the pressure is
measured with respect to the (presumed constant) pressure in the
displaced gas.
\begin{figure}
\centerline{\includegraphics[scale=0.5]{fig1}}
\caption{Time-dependence of the imbibition height in 1D capillary rise.
Circles: experimental data from \cite{Delker-etal-1996}
for beads $253\ \mu$m in diameter;
dashed line: fit considered in \cite{Delker-etal-1996};
solid line: present theory.
}
\label{fig:Delker96}
\end{figure}
An appropriate starting point for the modelling is the recently
developed approach \cite{SS-11a} that gives boundary conditions
for Laplace's equation for $p$,
\begin{equation}
\label{Laplace-p}
\nabla^2p=0,\qquad(\mathbf{r}\in\Omega),
\end{equation}
which follows from the Darcy and continuity equations above and
can be used to replace the latter. The key idea of this approach
is to consider the {\it modes of motion\/} which the menisci that
collectively form the wetting front undergo as the wetting front
propagates. The simplest model formulated in the framework of this
approach accounts for the two main modes: (i) the wetting mode,
where, on the pore scale, the contact line moves forward,
essentially in the Washburn regime but accounting for a {\it
dynamic}, i.e.\ velocity-dependent, contact angle $\theta_d$, and
(ii) the threshold mode, where the contact line gets pinned and
the meniscus bends as the pressure on it increases until the
contact angle reaches some threshold value $\theta_*$ when the
meniscus can go back into the wetting mode with the contact line
moving again. This increase of pressure on the meniscus as the
contact line gets pinned is similar to what one would have on a
piston sucking a liquid into a pipe if the motion of the piston is
blocked. For the porous medium, the maximum possible pressure on
the meniscus in the threshold mode, $\bar{p}|_{\partial\Omega_1}$,
is the solution of a {\it conjugate\/} problem \cite{SS-11a}
\begin{equation}
\label{conjugate}
\nabla^2\bar{p}=0,
\quad
(\mathbf{r}\in\Omega);
\qquad
\mathbf{n}\cdot\nabla(\bar{p}+\rho gz)|_{\partial\Omega_1}=0,
\end{equation}
with the boundary condition for $\bar{p}$ on $\partial\Omega_2$
being the same as for $p$; $\mathbf{n}$ is the outward normal to
$\partial\Omega_1$. Thus, the idea of the interface pinning is
already in the model, used in formulating the boundary conditions
on the wetting front that we will recapitulate below, and in this
work we consider how the model can be generalized to incorporate
the idea of random forces that could lead to de-pinning of the
interface and describe the observed features of the phenomenon
mentioned earlier.
On the moving wetting front the kinematic and dynamic boundary
conditions for (\ref{Laplace-p}) have the form
\begin{equation}
\label{kinematics-general}
\frac{\partial f}{\partial t} + \mathbf{u}\cdot\nabla f=0,
\end{equation}
\begin{equation}
\label{p=}
p=A_1 p_1+A_2 p_2,
\quad (\mathbf{r}\in\partial\Omega_1),
\end{equation}
where for a one-dimensional capillary rise $f(\mathbf{r},t)\equiv
z-h(t)$, $p_1$, $p_2$ are the averaged pressures and $A_1$, $A_2$
are the spatio-temporally averaged fractions of the unit area of
the free surface corresponding to the two modes of motion
($A_1+A_2=1$). For the wetting mode one has
\begin{equation}
\label{p_1=}
p_1=-2\sigma\cos\theta_d/a,
\end{equation}
where $\sigma$ is the liquid-gas surface tension, $a$ is the
effective radius of the capillary and the dependence of the dynamic
contact angle $\theta_d$ on the meniscus speed $u_1$ is given by
\cite{SS-11a,TheBook}
\begin{equation}
\label{theta_d=}
\frac{u_1}{U_{cl}}=
\left(
\frac{
(1+(1-\rho^s_{1e})\cos\theta_s)(\cos\theta_s-\cos\theta_d)^2}
{4(\cos\theta_s+B)(\cos\theta_d+B)}
\right)^{1/2},
\end{equation}
where $B=(1-\rho^s_{1e})^{-1}(1+\rho^s_{1e}u_0(\theta_d))$,
$\theta_s$ is the static contact angle,
$$
u_0(\theta_d)=\frac{\sin\theta_d-\theta_d\cos\theta_d}
{\sin\theta_d\cos\theta_d-\theta_d},
\qquad
U_{cl}
=\left(\frac{\gamma\rho^s_0(1+4\alpha\beta)}{\tau\beta}\right)^{1/2}
$$
is the characteristic speed associated with the parameters that the
`additional' physics of wetting brings in to resolve the well-known
`moving contact-line problem'
\citep{Dussan-review79,TheBook,dry_facts}, $\rho^s_0$,
$\rho^s_{1e}$, $\alpha$, $\beta$, $\gamma$, $\tau$ are material
constants characterizing the contacting media whose values can be
found elsewhere \citep{TheBook,Blake-me02}.
In the threshold mode, the contact line gets pinned and the
meniscus, experiencing an increase in pressure on it, bends so that
the contact angle varies from $\theta_d$, i.e.\ the value with which
the meniscus arrives at the threshold mode, to $\theta_*$, which is
the value at which the meniscus `breaks through' the threshold and
the process goes back to the wetting mode. From the dynamics of this
type of motion, under the assumption that the meniscus retains the
shape of a spherical cap with its radius varying as the meniscus
bends, one has that \cite{SS-11a}
\begin{equation}
\label{u_2=}
u_2=aJ(\theta_d)T^{-1},
\end{equation}
\begin{equation}
\label{p_2=}
p_2=P - \frac{2\sigma}{u_1T}
\left(\frac{Pa}{2\sigma}+\cos\theta_d\right)
J(\theta_d),
\end{equation}
where
\begin{equation}
\label{big_P=}
P = \bar{p}|_{\partial\Omega_1},
\end{equation}
$$
J(\theta_d)=
\left[\frac{1}{2}\tan\left(\frac{\theta}{2}-\frac{\pi}{4}\right)
+\frac{1}{6}\tan^3\left(\frac{\theta}{2}-\frac{\pi}{4}\right)
\right]_{\theta_d}^{\theta_*},
$$
$[f]_a^b\equiv f(b)-f(a)$, and the time $T$ that the meniscus
spends in the threshold mode is given by
\begin{equation}
\label{old_T=}
T=T_2\left(\theta_d,\frac{Pa}{2\sigma}\right)
\equiv a u_1^{-1}\left(\frac{Pa}{2\sigma}
+\cos\theta_d\right)
I\left(\theta_d,\frac{Pa}{2\sigma}\right),
\end{equation}
$$
I\left(\theta_d,\frac{Pa}{2\sigma}\right)
=\int_{\theta_d}^{\theta_*} \frac{d\theta}
{(1+\sin\theta)^2(Pa/(2\sigma)+\cos\theta)}.
$$
For a one-dimensional capillary rise, as follows from
(\ref{conjugate}), the stagnation pressure
$\bar{p}|_{\partial\Omega_1}$ is given simply by
$\bar{p}|_{\partial\Omega_1}=p_0-\rho gh(t)$, where $p_0$ is the
prescribed pressure at $z=0$.
Finally, the velocity of the wetting front as a whole,
$u_n=\mathbf{n}\cdot\mathbf{u}|_{\partial\Omega_1}$ is given by an
expression
\begin{equation}
\label{u_n=}
u_n=A_1u_1+A_2u_2,
\end{equation}
which is similar to (\ref{p=}). For the menisci intermittently going
through the wetting and threshold modes as the wetting front
propagates, the coefficients $A_1$ and $A_2$ can be viewed as
reflecting the fraction of the time spent in each mode as the
meniscus travels over the length of averaging that introduces the
Darcy scale (or, equivalently, the fraction of the interfacial area
corresponding to each mode of motion over the time interval of
averaging that introduces the Darcy time scale), i.e.\ the
spatio-temporal averages we mentioned earlier. They are given by
\cite{SS-11a}
\begin{equation}
\label{alphas}
A_1=\frac{s_1u_2}{s_2u_1+s_1u_2},
\quad A_2=\frac{s_2u_1}{s_2u_1+s_1u_2},
\end{equation}
where
\begin{equation}
\label{s=}
s_1(\theta_d,\theta_*)=
\left\{
\begin{array}{ll}
1, & \theta_d-\theta_*\ge0\\
s_{10}, & \theta_d-\theta_*<0
\end{array}
\right.,
\qquad
s_2=1-s_1,
\end{equation}
and $s_{10}$ $(<1)$ is a characteristic of the porous matrix. Then,
as should be expected, the slowest (controlling) mode of motion
makes a greater contribution to the average pressure and velocity at
the wetting front, and if the velocity $u_i$ corresponding to the
$i$th mode reaches zero, one will have $A_i=1$ and the pressure at
the wetting front, that is now at rest, will become equal to $p_i$.
Thus, the wetting front will stop propagating in two cases: (a)
$u_1=0$ and hence $\theta_d=\theta_s$, which means that the meniscus
has reached its equilibrium state corresponding to the maximum
imbibition height $h_{max}$, and (b) $u_2=0$ so that the wetting
front still has a capacity to propagate further but the contact line
became pinned (threshold mode) and the pressure that mounts on the
meniscus in this case, even when it reaches its maximum possible
value $\bar{p}|_{\partial\Omega_1}$, is insufficient to push the
meniscus through. Mathematically, in the last case we have that
$\bar{p}|_{\partial\Omega_1}$, which goes down as $h$ increases,
becomes equal to $p_*=-2\sigma\cos\theta_*/a$ and hence is unable to
make the contact angle greater than $\theta_*$, which would allow
the meniscus to resume its motion in the wetting mode: in this case
$I(\theta_d,Pa/(2\sigma))$ and hence the time $T$ go to infinity.
The height corresponding to this last case depends only on
$\theta_*$, and the meniscus reaches it in a finite time
\cite{SS-11a}.
\section{Subcritical interface de-pinning}
It is convenient to introduce the `stagnation' contact angle
$\bar{\theta}$ corresponding to the stagnation pressure
$\bar{p}|_{\partial\Omega_1}$ by
$\bar{\theta}=\arccos(-\bar{p}|_{\partial\Omega_1}a/(2\sigma))$.
Then, if $\bar{\theta}>\theta_s$, the wetting front has the
potential to propagate but if at the same time
$\bar{\theta}\le\theta_*$ the stagnation pressure
$\bar{p}|_{\partial\Omega_1}$ is unable to push the meniscus through
the threshold mode. In a sense, $\theta_*-\bar{\theta}$ can be
viewed as a quantitative measure of the potential barrier that has
to be overcome to get the meniscus back into the wetting mode when
$\bar{p}|_{\partial\Omega_1}$ is `subcritical', i.e.\ less than
$p_*$, and hence unable to push the meniscus through the threshold.
Importantly, since, until the wetting front reaches its equilibrium
position at the maximum height, at every moment individual menisci
are not in the same mode of motion (and, for the threshold motion,
not even in the same stage of it), the Darcy-scale pressures we are
considering, including the stagnation pressure
$\bar{p}|_{\partial\Omega_1}$, represent average values, whereas on
the pore scale one also has pressure fluctuations due to mutual
influences of menisci. These fluctuations are unimportant when the
stagnation pressure $\bar{p}|_{\partial\Omega_1}$ is capable of
pushing the meniscus through the threshold mode. However, as
$\bar{p}|_{\partial\Omega_1}$ goes down to $p_*$, the time $T$
needed to overcome the threshold increases, so that, when it becomes
large enough, it is the fluctuations that increasingly become the
mechanism of de-pinning, and when $\bar{\theta}\le\theta_*$ it is
only the random fluctuations that can de-pin the menisci.
The simplest way of accounting for the subcritical de-pinning due to
random fluctuations as this mechanism takes over from the `regular'
de-pinning due to the stagnation pressure is to assume that, once
$T_2$ becomes greater than a certain value $T_+$, it is these random
factors that will de-pin the interface and determine the time it
stays in the threshold mode. For the `regular' stagnation pressure
$\bar{p}|_{\partial\Omega_1}$ one has that $T_2\to\infty$ as
$\bar{p}|_{\partial\Omega_1}\to p_*$ and
$\bar{p}|_{\partial\Omega_1}$ is no longer able to push the meniscus
through when $\bar{p}|_{\partial\Omega_1}<p_*$. The probability of
the random factors de-pinning the interface should be expected to go
down as $p_*-\bar{p}|_{\partial\Omega_1}$ (or equivalently
$\theta_*-\bar{\theta}$) increases. The simplest way of generalizing
the model to incorporate the above scenario mathematically is to
replace (\ref{old_T=}) and (\ref{big_P=}) respectively with
\begin{equation}
\label{new_T=}
T=\left\{
\begin{array}{ll}
\displaystyle
T_2\left(\theta_d,\frac{a\bar{p}|_{\partial\Omega_1}}{2\sigma}\right),
& \hbox{if }
T_2(\bar{p}|_{\partial\Omega_1})\le T_+\\
T_++k(\theta_+-\bar{\theta})^2, & \hbox{if }
T_2(\bar{p}|_{\partial\Omega_1})> T_+
\end{array}
\right.,
\end{equation}
\begin{equation}
\label{new_big_P=}
P=\left\{
\begin{array}{ll}
\bar{p}|_{\partial\Omega_1}, & \hbox{if }
T_2(\bar{p}|_{\partial\Omega_1})\le T_+\\
p_+, & \hbox{if }
T_2(\bar{p}|_{\partial\Omega_1})> T_+
\end{array}
\right.,
\end{equation}
where $p_+$ is determined by $T_2(p_+)=T_+$, and
$\theta_+=\arccos(-p_+a/(2\sigma))$. Since $T_2$ rises steeply
only when $\bar{\theta}$ is close to $\theta_*$, in practice one
has that $\theta_+\approx\theta_*$ and $p_+\approx p_*$.
Now, we have a closed model, which, unlike {\it ad hoc\/} formulae
for one-dimensional propagation of the wetting front, is applicable
for a general case of three-dimensional flows. In order to describe
a particular flow involving a moving wetting front, one has to solve
Laplace's equation (\ref{Laplace-p}) for $p$ in $\Omega$ whose
boundary $\partial\Omega_1$ evolves according to
(\ref{kinematics-general}), where $\mathbf{u}$ is given by
(\ref{Darcy-1}), subject to the dynamic condition (\ref{p=}), where
equations (\ref{p_1=})--(\ref{p_2=}),
(\ref{u_n=})--(\ref{new_big_P=}) close the formulation and
$\bar{p}|_{\partial\Omega_1}$ is the solution of the conjugate
problem (\ref{conjugate}); the boundary conditions on
$\partial\Omega_2$ for both $p$ and $\bar{p}$ are the same and,
together with the initial shape of $\Omega$, they specify a
particular problem.
In the case of a one-dimensional capillary rise Laplace's
equations for $p$ and $\bar{p}$ give that these are linear
functions of $z$, and equations (\ref{Darcy-1}),
(\ref{Laplace-p}), (\ref{kinematics-general}) yield
$$
\frac{dh}{dt}=\frac{\kappa}{\mu}\left(
\frac{p_0-p(h,t)}{h} - \rho g\right),
$$
which together with the algebraic equations (\ref{p=}), where
$p|_{\partial\Omega_1}\equiv p(h,t)$, (\ref{p_1=})--(\ref{p_2=}),
(\ref{u_n=}), where $u_n=dh/dt$,
(\ref{alphas})--(\ref{new_big_P=}), with
$\bar{p}|_{\partial\Omega_1}=p_0-\rho gh$ as the solution of the
conjugate problem, form a closed system for $h$, $p(h,t)$, $p_1$,
$\theta_d$, $u_1$, $u_2$, $p_2$, $A_1$, $A_2$, $s_1$ and $s_2$.
The results of comparing the numerical solution corresponding to
the experimental flow conditions of \cite{Delker-etal-1996} with
the data are shown in Fig.~\ref{fig:Delker96}. As one can see, the
solid curve representing the computed solution describes the data
very well over the whole time period spanning more than four
orders of magnitude. The theoretical curve also levels off as
$t\to\infty$, indicating that the capillary rise does eventually
come to a halt. Comparison of the theory with all four sets of
experimental data from \cite{Delker-etal-1996} is shown in
Fig.~\ref{fig:all_four}. (The dashed line in this figure is used
to indicate that for the beads' diameter of 510~$\mu$m, strictly
speaking, the theory is used outside its limits of applicability
as the whole advancement of the wetting front is less than 40
beads' diameters, so that it is difficult to talk about the
separation of scales required for the continuum mechanics approach
to work. Indeed, for this approach to be applicable, there should
exist an intermediate scale much larger than the pore size and at
the same time much smaller than the macroscopic length scale on
which the flow is described. In the case of 510~$\mu$m beads,
`much large' and `much smaller' would mean 6 times larger or
smaller, which is clearly not sufficient to ensure acceptable
accuracy.)
It is noteworthy that, although the initial regime where the curve
in Fig.~\ref{fig:Delker96} rises steeply looks Washburn-like, it
actually involves both the wetting and the threshold modes of
motion. As in \cite{SS-11a}, the presence of the
threshold-overcoming motion becomes pronounced only when $h$
climbs close to $h_*=(p_0-p_*)/(\rho g)$, i.e.\ when $\bar{p}$
becomes close to $p_*$ or, in other words, $\bar{\theta}$ close to
$\theta_*$. For the results presented in Fig.~\ref{fig:Delker96}
$\theta_*=67^\circ$, $\theta_s=0^\circ$, $s_{10}=0.7$, $\mu
U_{cl}/(\kappa\rho g)=10^2$, $\rho^s_{1e}=0.6$, $T_+/T_0=3$,
$k/T_0=4\times10^3$, where $T_0=2\sigma\mu/((\rho g)^2a\kappa)$,
and it is $\theta_s$, $\theta_*$, $T_+/T_0$ and $k/T_0$ that are
most important. Since in the experiment, as described in
\cite{Delker-etal-1996}, the bottom of the test section was
located approximately 4~cm above the bottom of the porous sample,
we have to start the calculations from the bottom of the sample
with $p_0=\rho g Z_{ini}$, $Z_{ini}=4$~cm and, to compare theory
with the data, measure the time from the moment the wetting front
reaches $Z_{ini}$. To describe the data in
Fig.~\ref{fig:all_four}, only a variation of $\theta_*$ and
$K=k/T_0$ is required: $\theta_{*180}=64^\circ$,
$\theta_{*253}=67^\circ$, $\theta_{*359}=78^\circ$;
$K_{359}=3K_{253}=12K_{180}$., $K_{180}=10^3$.
\begin{figure}
\centerline{\includegraphics[scale=0.5]{fig2}}
\caption{All four experimental sets from \cite{Delker-etal-1996}
for beads' diameters
$180$~$\mu$m ($\triangledown$),
$253$~$\mu$m ($\circ$),
$359$~$\mu$m ($\vartriangle$), and
$510$~$\mu$m ($\square$).
The corresponding theoretical curves are given as solid lines for
the first 3 sets of data and as a dashed line for the last.
}
\label{fig:all_four}
\end{figure}
It is worth mentioning that, although in the model we represent
capillary effects in the pores using the capillary pressure and
viscous resistance corresponding to an `effective' circular
cross-section, whereas in the experiment with the porous matrix
made of spherical beads neither the `chambers' nor the `throats'
of the porous medium had circular cross-sections, no adjustment of
the results was required: we used the radius of the bead as $a$ in
the model with no subsequent calibration of the time and length
scales. This shows that a `representative' way of modelling the
porous medium, as opposed to much more difficult way of
incorporating into a model the exact porous structure determined
via elaborate and expensive experiments, allows one to incorporate
all the main features of the process on the pore scale, including
the actual physics of dynamic wetting, and obtain good results for
the flow on the Darcy scale.
\section{Discussion}
It is instructive to look at the obtained theoretical result and the
experiment it addresses from the viewpoint of the different
modelling approaches used to describe two-phase flows in porous
media. These approaches broadly fall into two general classes,
`representative' and `simulative'. They are not antagonistic as, in
theory, if the same pore-scale physics and the same characteristics
of the porous medium are accounted for in the models formulated in
the framework of each of these approaches, then the results produced
by these models are expected to converge and describe the same
macroscopic behaviour of the wetting front.
The present model has been developed in the framework of the
`representative' approach, where the equations and boundary
conditions are formulated on the Darcy scale, i.e.\ the scale
implying that the continuum limit has already been taken, and the
properties of the porous matrix are `represented' in terms of the
permeability coefficient (or tensor, if the porous medium is
anisotropic), effective size of the pores (or the corresponding
distribution), effective threshold angles, etc. Importantly, since
the pore-scale wetting process is modelled realistically, with the
velocity (as well as material) dependence of the dynamic contact
angle, the model captures naturally that the wetting front has the
capacity to move forward when the contact angle is greater than the
static value $\theta_s$, i.e.\ when the interface has not reached
its maximum height determined by the balance of capillary and
gravity forces. In other words, as the meniscus gets de-pinned, the
fact that the contact angle differs from its equilibrium value
$\theta_s$ and that the dynamic contact angle is velocity-dependent
act as a mechanism that moves the interface until the dynamic
contact angle goes down to $\theta_s$ and the interface reaches its
maximum height.
The subcritical de-pinning mechanism that comes into action when
$\bar{p}|_{\partial\Omega_1}\le p_*$ is formulated implicitly, in
terms of the `potential barrier' $\theta_+-\bar{\theta}$ and the
`waiting time' $T_++k(\theta_+-\bar{\theta})^2$ required for the
random fluctuations to overcome it. Both of these characteristics
are Darcy-scale parameters. A natural way to develop the model
further would be to remove the direct link between the `potential
barrier' and the `waiting time' and instead explicitly introduce the
field of pressure fluctuations depending on the flow rate and
properties of the porous matrix. Then, this pressure fluctuation
field becomes an addition to $\bar{p}|_{\partial\Omega_1}$ as a
breakthrough factor. For this explicitly introduced mechanism, the
results obtained in the present work would serve as a guideline,
indicating one of the outcomes that this mechanism should produce.
The implicit mechanism of the subcritical interface de-pinning on
the Darcy scale that we have introduced can be viewed as a
macroscopic manifestation of the dynamics of avalanches
\cite{Dougherty-Carle:1998}. Avalanches qualitatively stem from the
same physics as the one considered here and, in a sense, can be
regarded as a medium-scale phenomenon, i.e.\ between the pore scale
and the Darcy scale. The idea of linking the Darcy-scale subcritical
interface de-pinning and the dynamics of avalanches agrees with the
fact that the avalanche-type events have been observed
experimentally by Lago and Araujo \cite{Lago-Araujo-2001} in the
anomalous regime of the wetting front propagation. The avalanche
behaviour of the wetting fronts is currently being investigated in
terms of scaling laws \citep{Rost_etal:2007}, and the present
theory, with its explicit accounting for different pressures
corresponding to different modes of motion and their spatio-temporal
weights, offers a macroscopic framework for the mathematical
description of these medium-scale events. Then, the subcritical
interface de-pinning in the anomalous regime of the wetting front
propagation could be viewed as the macroscopic outcome of a
succession of avalanches with decreasing probabilities.
The `simulative' approach to the modelling of two-phase flows in
porous media is based on replacing the actual porous matrix with a
regular network of `chambers' and capillary `throats' connecting
them
\cite{Fatt:1956-164,Lenormand88,Aker00,Joekar-Niasar-2010,Markicevic:2011}.
In order to partially compensate the anisotropy inherent in this
approach where, as is the case in most works, the chambers are
placed at the nodes of a regular lattice, the sizes of both
chambers and throats are made random following certain prescribed
distributions. The macroscopic (Darcy-scale) characteristics of
the process are obtained as a result of the appropriate averaging.
The simulative approach has the appeal of what looks like a
numerical experiment offering a transparent link between the
pore-scale and the Darcy-scale dynamics, but, unlike the case of
molecular-dynamics simulations with regard to macroscopic fluid
mechanics, this appeal is moderated by a number of factors,
notably the fact that the actual dynamics on the pore scale is not
computed. Instead, it is essentially represented in terms of a
Washburn-type dynamics, thus by-passing the `moving contact-line
problem' \cite{Dussan-review79} and the associated physics of
dynamic wetting \cite{dry_facts,Blake-review:2006}. It is also
important to note that the rigid geometric structure of the
network simulating the actual porous matrix imposes unavoidable
limitations on the macroscopic transport properties of the porous
medium that the network is purports to simulate. By contrast, the
representative approach can introduce any tensorial and
topological characteristics of the porous medium that the
corresponding model requires. Having in mind the above
shortcomings of the representative approach, it is interesting to
look at what it can produce with regard to the anomalous regime of
the wetting front propagation.
Bijeljic {\it et al.} \cite{Markicevic:2011} performed the network
modelling of the capillary rise and compared their results to the
truncated set of data taken from Lago and Araujo
\cite{Lago-Araujo-2001}, with the original experiment by Delker et
al.\ \cite{Delker-etal-1996} mentioned but not used for comparison
with the simulations. The simulations agree well with the
experimental data for the times $t\le4\times10^4$~s after which the
simulated height levels off. However, when one takes the full set
of experimental data reported by Lago and Araujo, i.e. for the times
up to $t=2\times10^5$~s, one can immediately see that the wetting
front continues to climb. It is also worth noting that the simulated
height-vs-time curve in the $\log$-$\log$ coordinates is convex,
with the slope monotonically decreasing, whereas, as one can see in
Fig.~\ref{fig:Delker96}, the experimental data show a distinct
concave-convex sequence, i.e.\ after an initial decrease as the
anomalous regime is entered the slope picks up again until, finally,
the data start to level off, asymptotically approaching the maximum
height. The same trend was observed earlier by Diggins {\it et al.}
\cite{Diggins-Ralston-1990} whose data are given in Fig.~12 of Lago
and Araujo \cite{Lago-Araujo-2001}. This figure in
\cite{Lago-Araujo-2001} also clearly shows that, although the
time-dependence of the height of the capillary rise following from
the Washburn-type interplay of capillarity, viscosity and gravity
can describe the `regular' regime {\it and\/} be fitted to the
initial stage of the `anomalous' regime, it is nowhere near a
satisfactory description of the latter once the full set of data is
considered, as the height-vs-time curve in the logarithmic
coordinates picks up again and climbs much higher than the above
fitting predicts.
From the viewpoint of the theory developed in the present work, the
main deficiency of the currently implemented network models is that
they essentially deal only with one --- wetting --- mode of motion
of the menisci. Then, setting aside the minor (in comparison with
the effects considered here) variations introduced by randomizing
the size distributions, these models broadly reproduce the
Washburn-type dynamics of the wetting front. As a result, once
gravity starts to balance capillarity as the driving force, the
wetting front slows down and comes to a halt. Essentially, the
fitting of the simulated curve to the experimental data for the very
beginning of the anomalous regime is produced by adjusting the
maximum height of the capillary rise, whereas, as we pointed out in
the introduction, the intriguing feature of the anomalous regime is
precisely the fact that it lies in between the `normal' Washburnian
regime of the imbibition and the maximum height that is also
Washburnian.
The capillary network approach can be modified in a relatively
straightforward way to account for the threshold mode of motion. The
main element in this modification should be equipping the `chambers'
with threshold characteristics, such as the pressure required to
overcome the threshold, which, besides material properties, can
depend on the number of menisci reaching the same chamber. The
implementation of the subcritical de-pinning is more challenging as
this would require accounting for fluctuations of pressure
experienced by the liquid, i.e.\ replacing the Washburn-type models
of the flow in the `throats' by an essentially unsteady motion that
takes into account the unsteady processes in the neighboring
chambers and throats. An intermediate check for such a model could
be its ability to produce avalanches as the medium-scale phenomena
that on the Darcy scale result in the anomalous regime of
imbibition.
\section{Conclusion}
The developed theory shows that the new approach to the modelling
of the propagation of wetting fronts in porous media based on
considering specific modes of motion that the menisci of the pore
scale undergo as the front propagates allows one to incorporate
critical phenomena and adequately describe experimental data for
the anomalous regime of imbibition. Accounting for the random
pore-scale forces macroscopically, in terms of the `potential
barriers' and the corresponding times required for the random
forces to overcome these barriers, allowed the simplest model
formulated in the framework of the new approach to describe the
whole experimental curve, from the Washburn regime to the (also
Washburn) maximum imbibition height with the anomalous regime in
between. The proposed theory could be used as a guide for the
porous network modelling and the study of the anomalous imbibition
regime as the manifestation of the dynamics of avalanches.
This publication was based on work supported in part by Award No.\
KUK-C1-013-04 , made by King Abdullah University of Science and
Technology (KAUST).
\bibliographystyle{abbrvnat}
|
{
"timestamp": "2012-06-28T02:02:51",
"yymm": "1206",
"arxiv_id": "1206.6221",
"language": "en",
"url": "https://arxiv.org/abs/1206.6221"
}
|
\section{Introduction}
Let $\Bbbk$ be an algebraically closed field of characteristic $0$. The
question of classifying all Hopf algebras of a given
dimension over $\Bbbk$ goes back
to Kaplansky in 1975. To date, there are very few general results.
The Kac-Zhu Theorem \cite{Z}, states that a Hopf
algebra of prime dimension is isomorphic to a group algebra. S.-H. Ng
\cite{Ng} proved that in dimension $p^{2}$, the only Hopf algebras
are the group algebras and the Taft algebras, using previous results
in \cite{andrussch}, \cite{masuoka-p^n}. It is a common belief that
a Hopf algebra of dimension $pq$, where $p$ and $q$ are distinct
prime numbers, is semisimple. Hence, it should be isomorphic to a
group algebra or a dual group algebra by \cite{EG}, \cite{GW},
\cite{ma-2p}, \cite{pqq2}, \cite{So}.
This conjecture has been verified for some particular
values of $p$ and $q$, see \cite{andrunatale, bitidasca,
etinofgelaki2, Ng2, Ng3, Ng4}.
Hilgemann and S.-H. Ng gave the classification of
Hopf algebras of dimension $2p^{2}$ in \cite{hilgemann-ng} and more
recently Cheng and Ng \cite{ChNg} studied the case $ 4p $, solving
the problem for dimension $20, 28$ and $44$.
In fact, all Hopf algebras of dimension $\leq 23$ are
classified: for dimension $\leq 11$ the problem was solved by
\cite{W}; an alternative proof appears in \cite{stefan}. The
classification for dimension 12 was done by \cite{fukuda} in the
semisimple case and then completed by \cite{natale} in the general
case and for dimension $16$ it was solved by \cite{kashina},
\cite{pointed16}, \cite{biti}, \cite{de1tipo6chevalley} and
\cite{GV}. For dimension 18 the problem was solved by D. Fukuda
\cite{d-fukuda} and recently Cheng and Ng finished the
classification for dimension $20$. For the state of the
classification of low dimensional Hopf algebras as of 2009, see \cite{biti2}.
\smallbreak The classification appears more difficult for even
dimensions as studied in this article. One reason may be
that for $H$
a nonsemisimple Hopf algebra of odd dimension, either $H$ or $H^{*}$ has a
nontrivial grouplike element. The smallest dimension that is still
unclassified is $24$ and, since the classification for
dimension $27$ was recently completed
in \cite{bg}, the next unclassified dimension after $24$ is $32$.
\smallbreak In this paper we study Hopf algebras over
$\Bbbk$ whose dimension is either smaller than $100$ or
can be decomposed into the product of a small number of prime numbers.
In particular, we give some partial results on Hopf algebras of
dimension $8p$, with applications to the case of dimension $24$, and
dimension $rpq$, where $r,\ p,\ q$ are distinct prime numbers.
Since there are many results on the classification problem for
dimension
$4p$ \cite{ChNg} but the complete classification is incomplete,
we cannot hope to complete the classification
for dimension $8p$. However we can narrow the possibilities.
We will say
that a Hopf algebra $H$ is of {\it type}
$(r,s)$
if $|G(H)|=r$ and $|G(H^{*})|=s$.
\begin{mainthm}\label{thm:8p}
Let $ H $ be a nonsemisimple Hopf algebra of dimension
$ 8p $ with $p$ an odd prime. If $H$ is not of type $ (r,s) $ with
$ r,s $ powers of $ 2 $, $ (2p,2) $ or $ (2p,4) $, then $H$ is
pointed or copointed.
\end{mainthm}
Using counting arguments we can improve the theorem above
in case $ p=3 $.
\begin{mainthm}\label{thm:24}
Let $ H $ be a Hopf algebra of dimension $ 24$ such that the coradical is not a sub-Hopf algebra
of $H$.
Then $H$ is of type
$ (2,2) $, $ (2,4) $ or $ (6,4) $.
\end{mainthm}
\section{Preliminaries}
In this section we introduce notation, recall some previous results
which help with the classification of finite dimensional Hopf
algebras, see \cite{andrunatale}, \cite{natale}, \cite{stefan},
\cite{bitidasca}, \cite{biti}, \cite{GV}, \cite{d-fukuda}, and
introduce a few new ones. For the general theory of Hopf algebras
see \cite{Mo}, \cite{S}.
\subsection{Conventions}\label{subsec:conv}
Throughout this paper $ p,q $ will denote odd prime numbers, $
C_{k} $ the cyclic group of order $ k $ and $\mathbb{D}_{k}$ the
dihedral group of order $2k$. Unless otherwise specified, all Hopf
algebras in this article are finite dimensional over a field
$\Bbbk$ algebraically closed of characteristic zero.
\begin{remark}\label{rm: LR2} By \cite{LR2}, with the assumptions
above,
a Hopf algebra is semisimple if and only if
it is cosemisimple if and only if $S^2$, the square of the antipode,
is the identity. Thus if $L$,$K$ are semisimple sub-Hopf algebras of
a Hopf algebra $H$, then $\langle L,K \rangle$, the sub-Hopf algebra
of $H$ generated by $L$ and $K$ is semisimple since $S_H^2$ is the
identity on $L$ and on $K$.
\end{remark}
For $H$ a Hopf algebra over $\Bbbk$ then $\Delta$, $\e$, $S$
denote respectively the comultiplication, the counit and the
antipode; $\GH$ denotes the group of grouplike elements of $H$; $H_{0}$ denotes the coradical;
$(H_n)_{n \in {\mathbb N}}$ denotes the coradical filtration of $H$ and $L_h$
(resp. $R_h$) is the left (resp. right) multiplication in $H$ by
$h$.
The set of $(h,g)${\it -primitives} (with
$h,g\in\GH$) and set of {\it skew-primitives} of $H$ are:
$$
\begin{array}{rcl}
\cP_{h,g}(H)&:=&\{x\in H\mid\Delta(x)=x\otimes h+g\otimes x\},\\
\noalign{\smallskip} \cP(H)&:=&\sum_{h,g\in\GH}\cP_{h,g}(H).
\end{array}
$$
We say that $x\in \Bbbk(h-g)$ is a {\it trivial} skew-primitive; a
skew-primitive not contained in $\Bbbk G(H)$ is \textit{
nontrivial}.
Let $\coMn$ denote the simple coalgebra of
dimension $n^2$, dual to the matrix algebra ${\mathcal M}(n,\Bbbk)$.
We say that a coalgebra $C$ is a $d\times d$ \textit{matrix-like
coalgebra} if $C$ is spanned by elements $(e_{ij})_{1\leq i,j\leq n}$
such that $\Delta(e_{ij}) =
\sum_{1\leq l \leq n} e_{il} \otimes e_{lj}$ and $\e(e_{ij}) = \delta_{ij}$.
If the set $(e_{ij})_{1\leq i,j\leq d}$ of
a coalgebra $C$ of dimension $d^{2}$ is linearly independent,
following \c Stefan we call $\be = \{e_{ij}:\ 1\leq i,j\leq d\}$
a \textit{multiplicative matrix} and then
$ C\simeq \coMd $ as coalgebras.
Since the only semisimple and pointed Hopf algebras are the group
algebras, we shall adopt the convention that `pointed' means
`pointed nonsemisimple'. If the dual $H^\ast$ of a finite
dimensional nonsemisimple Hopf algebra $H$ is pointed, then
we say that $ H $ is \textit{copointed}.
\smallbreak Recall that a tensor category $\mathcal{C}$ over $\Bbbk$
has the Chevalley property if the tensor product of any two simple
objects is semisimple. We shall say that a Hopf algebra $H$ has
the \emph{Chevalley property} if the category $\mbox{\rm Comod\,} (H)$ of
$H$-comodules does.
\begin{rmk}
(i) The notion of the Chevalley property in the setting of Hopf
algebras was introduced by \cite{AEG}: it is said in \textit{loc.
cit.} that a Hopf algebra has the Chevalley property if the category
$\Rep(H)$ of $H$-modules does.
\smallbreak (ii) Unlike \cite{AEG}, in \cite[Section
1]{de1tipo6chevalley}, the authors refer
to the Chevalley property in the
category of $H$-comodules; this definition is the one we adopt.
Note that it is equivalent to say that the coradical $H_{0}$ of $H$ is a
sub-Hopf algebra.
\smallbreak (iii) If $H$ is semisimple or pointed then it has the
Chevalley property.
\end{rmk}
\par Let $N$ be a positive integer and let
$q$ be a primitive $ N^{th} $ root of unity. We denote by $ T_{q} $
the Taft algebra which is generated as an algebra by the elements $
g $ and $ x $ satisfying the relations $ x^{N} = 0 = 1-g^{N}$, $
gx=q xg $. Taft Hopf algebras are self-dual and pointed of dimension
$ N^{2} $ with $g$ grouplike and
$x$ a $(1,g)$-primitive, i.e., $ \Delta(g) = g\otimes g $ and $
\Delta(x) = x\otimes 1 + g\otimes x $. If $N=2$ so that $q = -1$, then $T_{-1}$
is called the Sweedler Hopf algebra
and will be denoted $H_4$ thoughout this article.
\subsection{Spaces of coinvariants}\label{sec: coinvariant}
Let $K$ be a coalgebra with a distinguished grouplike 1. If $M$
is a right $K$-comodule via $\delta$, then the space of {\it right
coinvariants} is $$ M^{\operatorname{co} \delta} = \{x\in
M\mid\delta(x)=x\ot1\}.
$$
Left
coinvariants are defined analogously. If $\pi:H\rightarrow K$ is a morphism of Hopf
algebras, then $H$ is a right $K$-comodule via $(1\otimes\pi)\Delta$.
In this case $H^{\operatorname{co} \pi}:=H^{\operatorname{co} (1\otimes\pi)\Delta}$ and
$H^{co\pi}$ is a subalgebra of $H$.
We make the following observation.
\begin{lema}\label{lm: on R pi=epsilon}
Let $\pi: H \rightarrow K$ be a Hopf algebra map and let $R:= H^{co\pi}$. Then
$\pi|_R = \varepsilon|_R$.
\end{lema}
\begin{proof} Let $z \in H^{co \pi}$. Since $\pi$ is a morphism of Hopf algebras,
\begin{displaymath}
\Delta_{K} \pi(z) = (\pi \otimes \pi) \Delta_H(z) = \pi(z_1) \otimes \pi(z_2) = \pi(z) \otimes 1,
\end{displaymath}
so that, applying $m_K \circ (\varepsilon_K \otimes \id_{K})$ to the equation above,
we obtain $\pi(z) = \varepsilon_K(\pi(z))\in \Bbbk$. Again, since $\pi$ is a Hopf algebra map,
$\varepsilon_K(\pi(z)) = \varepsilon_H(z)$ and so $\pi(z) = \varepsilon_H(z)$.
\end{proof}
\subsection{Extensions of Hopf algebras}\label{subsec:extensions}
Recall \cite{andrudevoto} that an exact sequences of Hopf algebras is a sequence of Hopf algebra
morphisms $A\overset{\imath}\hookrightarrow
H\overset{\pi}\twoheadrightarrow B$ where $A,H,B$ are any Hopf algebras, $\imath$ is injective, $\pi$ is
surjective, $\pi \imath = \varepsilon_A$, $\ker \pi = A^+H$ and $ A = H^{co \pi}$.
An exact sequence is called {\it central}
if $A$ is contained in the centre of $H$.
The next result will be useful throughout. For a proof see \cite[Lemma 2.3]{GV}.
\begin{lema} \label{lm: dim H co dim B = dim H}
If $\pi:H\rightarrow B$ is an epimorphism of Hopf algebras
then $\dim H=\dim H^{\operatorname{co} \pi}\dim B$. Moreover, if
$A=H^{\operatorname{co} \pi}$ is a sub-Hopf algebra of $H$ then the sequence $
A\overset{\imath}\hookrightarrow H\overset{\pi}\twoheadrightarrow B
$ is exact. \qed
\end{lema}
The following proposition tells us how to
compute, in a particular case,
the dimension of the coradical of $ H^{*} $
using exact sequences.
\begin{prop}\label{prop:exact-dim-cor}
Let $\Gamma$ be a finite group and
$A \hookrightarrow H \twoheadrightarrow \Bbbk \Gamma$
an exact sequence of
Hopf algebras.
Then $\dim (H^{*})_{0} = \dim (H/\rad H) =
|\Gamma|\dim (A^{*})_{0}$.
\end{prop}
\begin{proof}
The statement follows from the proof of \cite[Lemma 5.9]{GV}. The
idea is the following: since the sequence is exact, $H$ is the
$\Gamma$-crossed product $A*\Gamma$. Let $g \in \Gamma$, then the
weak action of $g$ on $A$ defines an algebra map and consequently
$\rad A$ is stable by $\Gamma$. Then $\rad A*\Gamma$ is a nilpotent
ideal of $A*\Gamma$ and $\rad A*\Gamma\subseteq \rad H$. Since
$H/(\rad A*\Gamma)$ is semisimple, it follows that $\rad H \subseteq
\rad A*\Gamma$ and hence $\dim (H^{*})_{0} = \dim (H/\rad H) = \dim
( A*\Gamma / \rad A*\Gamma)= |\Gamma|\dim (A/\rad A) =|\Gamma|\dim
(A^{*})_{0} $.
\end{proof}
\subsection{Yetter-Drinfel'd modules} \label{subsect:yd} For $H$
any
Hopf algebra, a left Yetter-Drinfeld module $ M $ over $H$ is a left
$H$-module $(M,\cdot)$ and a left $H$-comodule $(M,\delta)$ such
that for all $h \in H, m \in M$,
$$
\delta(h \cdot m) = h_{{1}} m_{(-1)}\cS(h_{{3}}) \otimes
h_{{2}}\cdot m_{(0)},
$$
where $\delta(m) = m_{(-1)}\otimes m_{(0)}$. We will denote this category by
$ {}^H_H\mathcal{YD} $.
\subsection{On the coradical filtration}\label{subsec:coradical filtration}
We begin by recalling a description of the coradical filtration due
to Nichols. More detail can be found in \cite[Section
1]{andrunatale}.
Let $D$ be a coalgebra over $\Bbbk$.
Then there exists a coalgebra projection
$\pi: D \to D_{0 }$ from
$ D$ to the coradical
$ D_{0}$ with kernel $I$, see
\cite[5.4.2]{Mo}. Define the maps
$$\rho_{L}:= (\pi\otimes \id)\Delta: D \to D_{0}\otimes D \qquad\mbox{ and }\qquad
\rho_{R}:= (\id\otimes \pi)\Delta: D \to D\otimes D_{0},$$
and let $P_{n}$ be the sequence of subspaces defined recursively
by
\begin{align*}
P_{0} & = 0,\\
P_{1} & = \{x\in D:\ \Delta(x) = \rho_L(x) +\rho_R(x)\}
= \Delta^{-1}(D_{0}\otimes I + I\otimes D_{0}),\\
P_{n} & = \{x\in D:\ \Delta(x) - \rho_L(x) - \rho_R(x) \in
\sum_{1\leq i \leq n-1}P_{i}\otimes P_{n-i}\}, \quad n\geq 2.
\end{align*}
Then by a result of Nichols, $P_{n} = D_{n}\cap I$ for $n\geq 0$,
see \cite[Lemma 1.1]{andrunatale}. Suppose that
$D_{0} = \bigoplus_{\tau \in {\mathcal I}} D_{\tau}$, where the $D_{\tau}$ are simple
coalgebras of dimension $d^{2}_{\tau}$.
Any $D_{0}$-bicomodule is a direct sum of simple $D_{0}$-sub-bicomodules
and every simple $D_{0}$-bicomodule has coefficient coalgebras
$D_{\tau}, D_{\gamma}$ and has dimension
$d_{\tau}d_{\gamma} = \sqrt{\dim D_{\tau}\dim D_{\gamma}}$
for some $\tau, \gamma \in {\mathcal I}$, where $d_{\tau},d_{\gamma}$
are the dimensions of the associated comodules of $D_{\tau}$ and
$D_{\gamma}$, respectively.
\smallbreak Now suppose $H$ is a Hopf algebra. Then $H_{n},P_{n}$ are
$H_{0}$-sub-bicomodules of $H$ via $\rho_R$ and $\rho_L$. As in
\cite{andrunatale}, \cite{d-fukuda}, for all $n\geq 1$ we denote by
$P_{n}^{\tau,\gamma}$ the isotypic component of the
$H_{0}$-bicomodule of $P_{n}$ of type the simple bicomodule with
coalgebra of coefficients $D_{\tau}\otimes D_{\gamma}$. If $D_{\tau} =
\Bbbk g$ for $g$ a grouplike, we use the superscript $g$
instead of $\tau$. If the simple subcoalgebras are $S(D_\tau)$, $S(D_\gamma)$,
(respectively $gD_\tau$, $D_\tau g$ for
$g$ grouplike)
we write $P_n^{S\tau, S\gamma}$,(respectively $P_n^{g\tau, g\gamma}$,$P_n^{\tau g, \gamma g}$.)
For $D_{\tau}, D_{\gamma}$ simple coalgebras we
denote $P^{\tau,\gamma} = \sum_{n \geq 0}P_{n}^{\tau,\gamma}$.
Similarly, for $\Gamma$ a set of grouplikes of $H$,
let $ P^{\Gamma,\Gamma}$ denote $\sum_{g,h \in \Gamma}P^{g,h}
$
and let $H^{\Gamma, \Gamma}:= P^{\Gamma, \Gamma}
\oplus \Bbbk\Gamma$. If $\mathcal{D},\mathcal{E}$ are
sets of simple subcoalgebras, let $P^{\mathcal{D}, \mathcal{E}}$ denote
$\sum_{D \in \mathcal{D}, E \in \mathcal{E}}P^{D,E}$.
Since $H_n = H_0 \oplus P_n$, we have that $H = H_0 \oplus \sum_{\tau,\gamma}
P^{\tau,\gamma}$.
Following D. Fukuda, we
say that the subspace $P_{n}^{\tau,\gamma}$ is
\textit{nondegenerate} if $P_{n}^{\tau,\gamma} \nsubseteq P_{n-1}$.
The following results are due to D. Fukuda; note that (ii) is a
generalization of \cite[Cor. 1.3]{andrunatale} for $n>1$.
\begin{lema}\label{lema:fukuda-deg} \label{lema:fukuda}
\label{lema:fukuda-deg-m}
\emph{(i)} \cite[Lemma 3.2]{fukuda-pq} If the subspace $P_{n}^{\tau,\gamma}$
is nondegenerate for some $n > 1$, then there exists a set of
simple coalgebras $
\{D_{1},\cdots ,D_{n-1} \}$ with
$P_{i}^{\tau,D_{i}}$, $P_{n-i}^{D_{i},\gamma}$ nondegenerate
for all $1\leq i\leq n$.
\par \emph{(ii)} \cite[Lemma 3.5]{fukuda-pq}
For $S$ the antipode in the Hopf algebra $H$ and $g \in \GH$,
$$\dim P_{n}^{\tau, \gamma} = \dim P_{n}^{S\gamma,S\tau}
= \dim P_{n}^{g\tau, g\gamma}= \dim P_{n}^{\tau g, \gamma g}.$$
\par \emph{(iii)} \cite[Lemma 3.8]{fukuda-pq} Let $C,D$ be
simple subcoalgebras such that $P_{m}^{C,D}$ is nondegenerate. If
$\dim C \neq \dim D$ or $\dim P_{m}^{C,D} - P_{m-1}^{C,D} \neq \dim
C$ then there exists a simple subcoalgebra $E$ such that
$P_{\ell}^{C,E}$ is nondegenerate for some $\ell\geq m+1$.\qed
\end{lema}
The following facts about dimensions from \cite{andrunatale} will
be useful later.
\begin{lema}\label{lema:andrunatale}\cite{andrunatale} Let $H$ be a Hopf algebra
with $G := G(H)$. Then for $n \geq 0$, $d \geq 1$, $|G|$ divides $\dim H_n$ and
$ \dim H_{0,d}$, where $H_{0,d}$ denotes the
direct sum of the simple subcoalgebras of $H$ of dimension $d^{2}$. Also
$H_n = H_0 \oplus P_n$ so that $|G|$ divides $\dim P_n$.\qed
\end{lema}
It is well-known (see for example \cite{andrunatale}) that
if a Hopf algebra $H$ has a nontrivial skew primitive element, then
$\dim H$ must be divisible by a square.
More precisely we have the following lemma.
\begin{lema}\label{lema: dim H mn rel pr}
Let $H$ be a Hopf algebra with $|G(H)| = m$
and $\dim H = mn$ where $m,n$ are relatively prime.
Then $H$ has no nontrivial
skew-primitive element.\qed
\end{lema}
\begin{proof}
Suppose that $x$ is a nontrivial skew-primitive
element in $H$ and let $L$ be the
sub-Hopf algebra of $H$ generated by $x$ and $G(H)$.
By \cite[5.5.1]{Mo}, $L$ is
pointed. By \cite[Proposition 1.8]{andrunatale},
$\dim L$ is divisible by $rm$ where $r\neq 1$ is a positive integer
dividing $m$. Then $\dim H = mn$ is divisible by $rm$,
contradicting the fact that $(m,n) = 1$.
\end{proof}
The next proposition generalizes results of Beattie and
D\v{a}sc\v{a}lescu \cite{bitidasca} and gives
a lower bound for the
dimension of a finite dimensional Hopf algebra
without nontrivial skew-primitive elements.
\begin{prop}\label{prop:biti-dasca}
\label{cor:bitidasca-p1}\cite[Proposition 3.2]{bg}
Let $H$ be a non-cosemisimple Hopf
algebra with no nontrivial skew-primitives.
\par \emph{(i)} For any $g \in G(H)$ there exists a simple subcoalgebra $C$ of
$H$ of dimension $> 1$ such that $P_{1}^{C,g}\neq 0$, $P_k^{C,D}$ is
nondegenerate for some $k>1$ and $D$ a simple subcoalgebra of the same dimension as
$C$, and $P_m^{g,h}$ is nondegenerate for some $m>1$ and $h$
grouplike.
\par \emph{(ii)}
Suppose $H_0\simeq \Bbbk G \oplus \sum_{i=1}^t{\mathcal M}^*(n_i,\Bbbk)$ with $t\geq 1$,
$2 \leq n_1 \leq \ldots \leq n_t$. Then
\begin{equation} \label{form: bdf bound}\nonumber
\dim H \geq \dim(H_0) + (2n_1 + 1)|G| + n_1^2.
\end{equation}\qed
\end{prop}
\subsection{Matrix-like coalgebras}
The next theorem due to \c Stefan has been a key component for
several classification results.
\begin{theorem}\label{thm:stefan}\cite[Thm. 1.4]{stefan}
Let $D$ be the simple coalgebra $\mathcal{M}^\ast(2, \Bbbk)$.
\begin{enumerate}
\item[(i)] For $f$ an antiautomorphism of $D$ such
that $\operatorname{ord} ( f^{2}) = n < \infty$ and $n > 1$, there exist a multiplicative
matrix $\be$ in
$D$ and a root of unity $\omega$ of order $n$ such that
$$f(e_{12}) = \omega^{- 1} e_{12},\quad f(e_{21}) = \omega e_{21},\quad
f(e_{11}) = e_{22},\quad f(e_{22}) = e_{11}.$$
\item[(ii)] For $f$ be an automorphism of $D$ of finite order $n$,
there exist a multiplicative matrix $\be$ on $D$
and a root of unity $\omega$ of order $n$ such
that $f(e_{ij}) = \omega^{i-j} e_{ij}$.
\qed
\end{enumerate}
\end{theorem}
Now we recall some useful results on matrix-like coalgebras.
In \cite{bitidasca} all $2\times 2$ matrix-like
coalgebras of dimension less than $4$ were described; we summarize in the
following theorem
\begin{theorem}\cite[Thm. 2.1]{bitidasca}\label{thm:2x2-matrix}
Let $D$ be a $2\times 2$ matrix-like coalgebra of dimension less
than $4$. If $\dim D = 1,2$ then $D$ has a basis of grouplike elements.
If $\dim D = 3$, then $D$ has a basis $\{ g,h,x \}$ where $g,h$ are grouplike and
$x$ is $(g,h)$-primitive.\qed
\end{theorem}
We end this section with the following lemma.
\begin{lema}\label{lema:proj-coalg}
Let $\pi: H \to H_{4}$ be a Hopf algebra epimorphism.
If $H$ is generated by a simple
subcoalgebra $D$
of dimension $4$, then $\dim\ ^{\operatorname{co} \pi}D \geq 2$ or
$\dim D^{\operatorname{co} \pi} \geq 2$.
\end{lema}
\begin{proof}
Since $H_{4}$ is pointed, $\dim \pi(D) < 4$. Moreover, since
$D$ generates $H$ as an algebra, $\pi(D) $ generates $H_4 $.
Then by Theorem \ref{thm:2x2-matrix}, $ \dim \pi(D) =3 $.
Let $\{e_{ij}\}_{1\leq i,j\leq 2}$
be a multiplicative matrix of $D$, then
$\{\pi(e_{ij})\}_{1\leq i,j\leq 2}$ is a linearly dependent set.
\par As in the proof of Theorem \ref{thm:2x2-matrix},
see \cite[Thm. 2.1]{bitidasca}, we divide the proof into two cases.
\noindent {\bf Case 1:} The set $\{\pi(e_{11}), \pi(e_{12}),
\pi(e_{21})\}$ is linearly independent.
\par
Then $\pi(e_{22}) = \pi(e_{11}) +
a \pi(e_{12}) + b \pi(e_{21})$ for scalars $a,b$.
By comparing $\Delta \pi(e_{22})$ and $\pi \otimes \pi \circ \Delta (e_{22})$ one sees that
$ab = -1$ so that there exists $0 \neq b\in \Bbbk$
such that
$\pi(e_{22}) = \pi(e_{11}) + b \pi(e_{12}) -
\frac{1}{b}\pi(e_{21})$. Then it is straightforward to verify
that
the linearly independent elements $h_{1}= \pi(e_{11}) -
\frac{1}{b}\pi(e_{21})$ and
$h_{2} = \pi(e_{11}) + b\pi(e_{12})$ are grouplike.
\par
Suppose that $h_{1} = 1$. Then it is straightforward to show that $ t_{1}=
e_{11} - \frac{1}{b}e_{21} \in \ ^{\operatorname{co} \pi}D$.
Let $s_{1}= e_{22} - be_{12} $ and
note that $s_1,t_1$ are linearly independent.
Then $\pi(s_{1}) =
\pi(e_{22}) - b\pi(e_{12}) =
\pi(e_{11}) + b \pi(e_{12}) - \frac{1}{b}\pi(e_{21})- b\pi(e_{12})=
\pi(e_{11}) - \frac{1}{b}\pi(e_{21}) = \pi(t_{1}) = 1$.
Then a computation similar to that for $t_1$,
shows that $s_{1} \in \ ^{\operatorname{co} \pi}D$. Thus $\dim \ ^{\operatorname{co} \pi}D \geq 2$.
\par
If $h_2 = 1$ then define $t_2 = e_{11} + be_{12}$ and
$s_2 = e_{22} + \frac{1}{b}e_{21}$. A computation
similar to that above shows that $t_2,s_2 \in D^{co\pi}$
so that $D^{co\pi}$ has dimension at least $2$.
\noindent {\bf Case 2:}
The set $ \{\pi(e_{11}),\pi(e_{12}), \pi(e_{22} )\} $
is linearly independent.
Then there exist $a, b \in \Bbbk$ such
that $\pi(e_{21}) = a\pi(e_{11}) + b\pi(e_{12})- a\pi(e_{22})$.
If $a \neq 0$, then the case reduces to Case 1. If $a = 0$, then
$\pi(e_{21}) = b\pi(e_{12})$ and by comparing
$\Delta \pi (e_{21})$ and $\Delta \pi (be_{12})$, we see that
$b=0$ so that $\pi(e_{21})=0$.
Thus
$ \Delta(\pi(e_{11})) = \pi(e_{11})\otimes \pi(e_{11}) $,
$ \Delta(\pi(e_{22})) = \pi(e_{22})\otimes \pi(e_{22}) $ and
$ \Delta(\pi(e_{12})) = \pi(e_{11})\otimes \pi(e_{12}) +
\pi(e_{12})\otimes \pi(e_{22}) $, which implies
that $G(H_{4}) = \langle \pi(e_{11}), \pi(e_{22})\rangle$.
If $ \pi(e_{11}) = 1 $, then $e_{11} \in D^{\operatorname{co} \pi}$,
for
\begin{align*}
(1\otimes \pi) \Delta(e_{11}) =
(1\otimes \pi) (e_{11}\otimes e_{11} + e_{12}\otimes e_{21})
= e_{11}\otimes \pi(e_{11}) + e_{12}\otimes \pi(e_{21}) =
e_{11}\otimes 1.
\end{align*}
Moreover, the element $e_{11} + e_{21} \in D^{\operatorname{co} \pi}$,
since
\begin{align*}
(1\otimes \pi) \Delta(e_{11} + e_{21}) & =
(1\otimes \pi) (e_{11}\otimes e_{11} + e_{12}\otimes e_{21} +
e_{21}\otimes e_{11} + e_{22}\otimes e_{21}\\
& = e_{11}\otimes \pi(e_{11}) + e_{12}\otimes \pi(e_{21})
+e_{21}\otimes \pi(e_{11}) + e_{22}\otimes \pi(e_{21}) =
e_{11}\otimes 1 + e_{21}\otimes 1.
\end{align*}
Thus, $\dim D^{\operatorname{co} \pi} \geq 2$. The case $ \pi(e_{22})=1 $
is completely analogous and implies that $\dim \ ^{\operatorname{co} \pi}D\geq 2$,
taking the elements $e_{22}$ and $e_{22} + e_{21}$.
\end{proof}
Note that if $D$ in Lemma \ref{lema:proj-coalg}
is stable under $S^2$ then the proof can be simplified considerably.
For then we may choose a multiplicative matrix for
$D$ consisting of eigenvectors for $S^2$ and we must have
that $\pi(e_{ii}) \in \Bbbk C_2 \subset H_4$.
\subsection{Hopf algebras generated by a simple subcoalgebra}
\label{subsec:gen-simple}
In this subsection we summarize some known facts about
Hopf algebras generated by simple subcoalgebras of dimension $4$.
The most important is the next proposition,
due to Natale but derived from
a result of \c{S}tefan \cite{stefan}.
\begin{prop}\label{prop:natale-stefan}\cite[Prop. 1.3]{natale}. Suppose that $H$ is
nonsemisimple Hopf algebra generated by a simple subcoalgebra of
dimension $4$ which is stable by the antipode. Then $H$ fits into a
central exact sequence $\Bbbk^G\overset{\imath}\hookrightarrow
H\overset{\pi}\twoheadrightarrow A,$ where $G$ is a finite group and
$A^*$ is a pointed nonsemisimple Hopf algebra.\qed
\end{prop}
We have the following useful results from \cite{GV}. If $H$ is a Hopf algebra,
${\mathcal
Z}(H)$ denotes its centre.
\begin{lema}\label{truco util}\cite[Lemma 4.2]{GV}
Let $\pi:H\rightarrow K$ be a morphism of
Hopf
algebras such that $\pi(g)=1$ for some $g\in\GH,\,g\neq1$. Suppose
that $H$ is generated by a simple
subcoalgebra of dimension $4$ stable by $L_g$. Then $\pi(H)\subseteq
\Bbbk G(K)$. \smallbreak The same holds with $R_g$ instead of
$L_g$; or with $\adl(g)$ or $\ad_r(g)$ if $g\notin{\mathcal
Z}(H)$.\qed
\end{lema}
\begin{lema}\label{truco util bis}\cite[Lemma 4.3]{GV}
Let $\pi:H\rightarrow K$ be an epimorphism of
Hopf algebras and assume that $K$ is nonsemisimple. Suppose that
$H$ is generated by a
simple subcoalgebra of $H$ of dimension $4$ stable by $S_H^2$.
Then $\operatorname{ord} S_H^2 = \operatorname{ord} S^2_K$.\qed
\end{lema}
\begin{obs}
$(i)$ If $H$ is generated as an algebra by $C\oplus D$ with $S(C) = D$,
then $C$ also generate $H$ as an algebra, since the
sub-bialgebra generated by $C$ is finite dimensional and thus a
sub-Hopf algebra.
\par $(ii)$ Suppose that
$H$ is generated by a
simple subcoalgebra $C$ of dimension $4$ stable by $S_H^2$.
If $H_{4}$ is a sub-Hopf algebra of $H^{*}$, then
$S_{H}^{4} = \id$. For the inclusion $H_{4} \hookrightarrow H^{*}$
induces a Hopf algebra surjection $H\twoheadrightarrow H_{4}$ and by
Lemma \ref{truco util bis} the claim follows.
\end{obs}
We end this section with the following proposition.
\begin{prop}\label{prop:R-skew}\label{pr: HNg 1.3}
Let $ \pi: H \twoheadrightarrow A $ be a Hopf algebra
epimorphism and assume $ \dim H = 2\dim A $. Then
$H^{\operatorname{co} \pi}= \Bbbk\{1,x\}$ with $x$ a $(1,g)$-primitive
element with $g\in G(H)$ and $g^{2}=1$. Moreover,
if
$x$ is nontrivial, then $H$ contains a sub-Hopf algebra isomorphic
to $ H_{4} $. In particular, $4| \dim H$.
\end{prop}
\begin{proof} The statement follows from the proof of
\cite[Prop. 1.3]{hilgemann-ng}. We reproduce
part of the proof. Let $ R=H^{\operatorname{co} \pi} $;
it is known that $R$ is a left coideal subalgebra and
$\dim R=2$. Let $x \in R\smallsetminus \{0\}$ such
that $\varepsilon (x) = 0 $. Then $R = \Bbbk\{1,x\}$ and
$\Delta(x) = a\otimes 1 + b\otimes x$ for some $a,b\in H$.
Since $x \in R$, it follows that $x = a$ and by the
coassociativity of $ \Delta $ we have that $b$ is grouplike.
Thus $x\in P_{1,b}$ and $x$ is a skew-primitive element.
\end{proof}
\subsection{Hopf algebras of dimension $4p$}\label{sec: 4p general}
This section contains a brief overview of what is known for
dimension $4p$. Knowledge of the classification in this dimension is
of course necessary to understand dimension $8p$ which we study in
the last section of this note. Recall that for dimension $12$ the
classification is due to
\cite{andrunatale}, \cite{fukuda}, \cite{natale}.
\par Up to isomorphism, the semisimple Hopf algebras of dimension
$4p$ consist of group algebras and their duals, and also of two
self-dual Hopf algebras, constructed by Gelaki in
\cite{G}, which we will denote by $\mathcal{G}_{1}$ and
$\mathcal{G}_{2}$. Both have group of grouplikes of order $4$ with
$G(\mathcal{G}_{1}) \cong C_4$ and $G(\mathcal{G}_{2}) \cong C_2
\times C_2$.
\subsubsection{Nonsemisimple pointed Hopf algebras of dimension
$ 4p $.}\label{sect: 4p} All pointed Hopf algebras of dimension $4p$
have group of grouplikes isomorphic to $C_{2p}$ and are described in
\cite[A.1]{andrunatale}.
\par In particular, let $\mathcal{A}$ be a pointed Hopf algebra of dimension $ 4p $.
Then, with $g$ denoting a generator of $C_{2p}$, and $\xi$ a primitive $p$th root of unity,
$\mathcal{A}$ is isomorphic to exactly one of the following.
$$
\begin{array}{ll}
\mathcal{A}(-1,0):=&\Bbbk\langle g,x \mid
g^{2p}-1=x^2 =gx+xg= 0\rangle,
\\ \noalign{\smallskip}
&\Delta(g)=g\otimes g,\quad\Delta(x)=x\otimes 1 + g\otimes x.
\end{array}
$$
$$
\begin{array}{ll}
\mathcal{A}(-1,0)^\ast:=&\Bbbk\langle g,x \mid
g^{2p}-1=x^2 =gx+ \xi xg= 0\rangle,
\\ \noalign{\smallskip}
&\Delta(g)=g\otimes g,\quad\Delta(x)=x\otimes 1+g^p\otimes x .
\end{array}
$$
$$
\begin{array}{ll}
\mathcal{A}(-1,1) :=&\Bbbk\langle g,x \mid
g^{2p}-1= x^2 - g^2 + 1 =gx+ xg= 0\rangle,
\\ \noalign{\smallskip}
&\Delta(g)=g\otimes g,\quad\Delta(x)=x\otimes 1+g \otimes x .
\end{array}
$$
$$
\begin{array}{ll}
H_{4} \otimes \Bbbk C_p:=&\Bbbk\langle g,x \mid
g^{2p}-1= x^2 =gx+ xg= 0\rangle,
\\ \noalign{\smallskip}
&\Delta(g)=g\otimes g,\quad\Delta(x)=x\otimes 1+g^p \otimes x .
\end{array}
$$
Note that $H_4 \otimes \Bbbk C_p$ is self-dual. The Hopf algebra $\mathcal{A}(-1,1)$ is a nontrivial lifting
of $\mathcal{A}(-1,0)$ and has nonpointed dual. The nonpointed Hopf algebra $\mathcal{A}(-1,1)^\ast$ contains a copy of the Sweedler Hopf algebra and
as a coalgebra $ \mathcal{A}(-1,1)^\ast \cong H_4 \otimes \mathcal{M}^\ast(2, \Bbbk)^{p-1}$.
The Hopf algebras
$\mathcal{A}(-1,0)$ and $\mathcal{A}(-1,1)$ do not have sub-Hopf algebras isomorphic to $H_4$ but
$\mathcal{A}(-1,0)^\ast$ and $H_4 \otimes \Bbbk C_p$ do.
In all four cases, $S^4 = \id$. In Section \ref{sec:8p} we
will use this notation for these pointed Hopf algebras. \vspace{1mm}
\subsubsection{Nonsemisimple nonpointed Hopf algebras of dimension $ 4p$}
These Hopf algebras were studied
in \cite{ChNg} with the classification completed for $p=3, 5, 7,
11$. The main theorems of \cite{ChNg} are:
\begin{theorem} \label{th: ChNg I} \cite[Theorem I]{ChNg} For $H$ a nonsemisimple Hopf
algebra of dimension $4p$, then $H$ is pointed if and only if
$|G(H)|>2$.
\end{theorem}
\begin{theorem}\label{th: ChNg II}\cite[Theorem II]{ChNg} For $H$
a nonsemisimple Hopf algebra of dimension $4p$ where $p \leq 11$
is an odd prime, then $H$ or $H^\ast$ is pointed.
\end{theorem}
\subsubsection{Applications of counting arguments}
We end this section by applying the preceding preliminary material
to give some simple alternate arguments for known facts about
nonsemisimple nonpointed Hopf
algebras of dimension $4p$.
\begin{prop}\label{prop:dim4p-groupp} Let $H$ be a nonsemisimple
nonpointed Hopf algebra of dimension $4p$. Then
$|\GH| \notin \{ p,2p\}$.
\end{prop}
\begin{proof}
Suppose that $|\GH| = p$ and then $ \Ho = \Bbbk\GH \oplus (\oplus_{i=1}^{t}
D_i^{n_{i}} ) $
where the $D_i$ are simple subcoalgebras of dimension $d_i^2$ with
$1 < d_1 <d_2 < \ldots <d_t$. If $p$ divides some $d_i$, then $\dim
H_0 \geq p + p^2 = p(1+p) \geq 4p$ since $p\geq 3$, a contradiction.
Thus $(p,d_i)=1$ for all $i$ and $p$ must divide $n_i$ by Lemma
\ref{lema:andrunatale}. Then $\dim H_0 \geq p + 4p >4p$, a
contradiction. An analogous proof shows that $|G(H) | \neq 2p$.
\end{proof}
\begin{prop}\label{dim4p-coalg-stable}
Suppose that $H$ is generated as an algebra
by a simple coalgebra $D$ of dimension $4$ which is stable
by the antipode. Then $H^{*}$ is pointed.
\end{prop}
\begin{proof}
By Proposition \ref{prop:natale-stefan}, $H$ fits into a central
exact sequence of Hopf algebras
\begin{displaymath} \Bbbk^{G}
\hookrightarrow H \twoheadrightarrow A,\end{displaymath}
with $A$ nonsemisimple and $A^{*}$ pointed.
Since $\dim A$ divides $4p$, then $\dim A \in \{ 1,2,4,p,2p,4p \}$.
We will show that
$\dim A = 4p$ so that $H \cong A$ and thus
$H^\ast$ is pointed.
\par Since $H$ is nonsemisimple, $\dim A >1$.
If $\dim A = 2, p$ or $2p$, then
$A$ is semisimple by the classification of Hopf algebras of
these dimensions, see \cite{stefan}, \cite{W}, \cite{Z}, \cite{Ng3}.
Since $\Bbbk^{G}$ is semisimple, this would imply that
$H$ is also semisimple, a contradiction.
\par If $\dim A = 4$, then $\dim \Bbbk^{G} = p$
and this implies
that $ \Bbbk^{G} \simeq \Bbbk C_{p}$.
Thus $C_{p} \subseteq \GH$
so that $|\GH| = p$ or $2p$, a contradiction by
Proposition \ref{prop:dim4p-groupp}.
Thus $A = H$ and $H^\ast $ is pointed.
\end{proof}
\section{Hopf algebras of dimension $rpq$} In this section, $H$
will be a Hopf algebra of dimension $rpq$, with $r<p<q$ primes.
Recently, Etingof, Nikshych and Ostrik \cite{eno-08},
finished the classification of the semisimple Hopf algebras of
dimension $rpq$ and $rp^{2}$. Specifically, they prove that all
semisimple Hopf algebras of these dimensions can be obtained as
abelian extensions (Kac Algebras). Then, the classification follows by a
result of Natale \cite{pqq}.
\textbf{Thus, we
will assume that $H$ is nonsemisimple.} One purpose of this section
is to apply counting arguments in the style of D. Fukuda as we did
in \cite{bg}.
\begin{remark}\label{rm: rpq not gen by C}
Recall that by Lemma \ref{lema: dim H mn rel pr} a nonsemisimple
Hopf algebra $H$ of dimension $rpq$ is nonpointed, has no pointed
sub-Hopf algebras and has no pointed
quotient Hopf algebras. In particular, $H$ cannot be generated by a simple
$4$-dimensional subcoalgebra $C$ stable under the antipode. For then
by Proposition \ref{prop:natale-stefan}, $H \cong \Bbbk^G$, which is
semisimple, a contradiction. \qed
\end{remark}
\par Also $H$ cannot have the Chevalley property.
The proof is based
on the proof of \cite[Lemma A.2]{andrunatale}.
\begin{prop}\label{prop:no-chev-rpq}
No nonsemisimple Hopf algebra $H$ of dimension $rpq$ has the
Chevalley property.
\end{prop}
\begin{proof} Suppose that $H$ has the Chevalley property. Then $\dim \Ho
\vert \dim H$ and since $H$ is not pointed or cosemisimple, $1 <
\dim \Ho < \dim H$. Then $\dim \Ho = st$, where $s,t \in \{r, p,q
\}$ and $s< t$. But by \cite{EG}, \cite{So}, \cite{pqq} or if $s= 2$ by \cite{Ng3},
all semisimple Hopf algebras of dimension
$st$ are trivial, \textit{i.e.} isomorphic to a group algebra or the
dual of a group algebra. Hence $\Ho \simeq \Bbbk^{F}$ with $F$ a
non-abelian group of order $st$; in particular, $s\vert (t-1)$.
Consider now the coradical filtration on $H$ and the associated
graded Hopf algebra $\operatorname{gr} H$. Then write $\operatorname{gr} H \simeq R\# \Bbbk^{F}$
with $R$ the \textit{diagram} of $H$. Then $(\operatorname{gr} H)^{*} \simeq
R^{*}\# \Bbbk F$, which implies that $(\operatorname{gr} H)^{*}$ is pointed. This
cannot occur, since $\dim (\operatorname{gr} H)^{*} = \dim \operatorname{gr} H = \dim H = rpq$.
Hence, $\Ho$ cannot be a sub-Hopf algebra. \end{proof}
We note that all Hopf algebras of dimension
$30 = 2 \cdot 3 \cdot 5$ are group algebras
or duals of group algebras by
\cite{fukuda-30} but the classification of the other Hopf algebras
of dimension $rpq$ with $rpq <100$, namely dimensions $42,66,70$ and
$78$ is still open. We make a few observations about these cases.
\begin{obs}\label{rmk:dimrpq-properties} (i) A nonsemisimple Hopf
algebra of dimension $2pq$ cannot have a semisimple sub-Hopf algebra
$A$ of dimension $pq$. For if this were the case, there would be a
Hopf algebra epimorphism $\pi: H^\ast \rightarrow A^\ast$ and we
apply Proposition \ref{pr: HNg 1.3}. Since $H^\ast$ has no
nontrivial skew-primitive elements, $(H^\ast)^{co\pi} = \Bbbk C_2$
and we have an exact sequence of Hopf algebras $ \Bbbk C_2
\hookrightarrow H^\ast \twoheadrightarrow A^\ast$. Thus if $A$, and
thus $A^\ast$, were semisimple, $H^\ast$ and $H$ would be also.
\par (ii) Suppose that $H$ is nonsemisimple of dimension $2pq$
where all Hopf algebras
of dimension $pq$ are semisimple. Then by (i) above, $H$ has no
sub-Hopf algebras of dimension $pq$. Suppose that $|G(H)|
>2$ and let $C$ be a simple subcoalgebra of dimension greater
than $1$. We will show that $C$ generates $H$.
Indeed, let $\mathcal{C} := \langle C \rangle $ be the sub-Hopf algebra generated by
$C$. Then $\dim \mathcal{C} \in \{2p,2q, 2pq \}$. If $\mathcal{C}
\neq H$, then $\mathcal{C} \cong \Bbbk^{\mathbb{D}_m}$ with $m \in \{p,q\}$.
Then $H$ is generated by $\mathcal{C}$ and $\Bbbk G(H)$, so by
Remark \ref{rm: LR2}, $H$ is semisimple, a contradiction.
\end{obs}
\begin{lema} Suppose that $H$ has dimension $2pq$ with $2 <p < q$. Then
\begin{itemize}
\item[(i)] $|G(H)| \neq pq$.
\item[(ii)] If $p \leq 7$ then $|G(H)| \neq 2q$ and if $q \leq 7$ then $|G(H)| \neq 2p$.
\item[(iii)] If $p \leq 5$ then $|G(H)| \neq q$.
\end{itemize}
\end{lema}
\begin{proof}(i) The statement was proved in Remark
\ref{rmk:dimrpq-properties}(i).
\par (ii) If $|G(H)| = 2p$, since for all $d$, by Lemma \ref{lema:andrunatale}, $2p$
divides $\dim H_{0,d}= nd^2$ for some $n \geq 1$ then
$\dim
H_0 \geq 2p + 4p = 6p$. Then by Proposition \ref{prop:biti-dasca}(ii) and Lemma \ref{lema:andrunatale},
$\dim H \geq
6p + 2p(5) + 4p = 20p$, a contradiction if $q \leq 7$.
If $|G| = 2q$ and $p \leq 7$ the argument is the same.
\par (iii) Assume $|G(H)| =q$.
For $G, {\mathcal D}$ as above, $\dim H_0 \geq q + 4q = 5q$,
$2\dim P_1^{G,{\mathcal D}}\geq 4q$, $\dim P^{G,G}$ and
$\dim P^{{\mathcal D},{\mathcal D}}$ must be divisible
by $q$ and so $\dim H \geq 11q > 2(5)q$, a contradiction.
\end{proof}
\begin{cor}\label{cor: not order of gplikes}
\begin{enumerate}
\item[(i)] If $ \dim H =42$,
then $|G(H) | \notin \{21, 14,7,6 \}$.
\item[(ii)] If $\dim H = 70$, then $|G(H)| \notin \{ 35,14,10,7 \}$.
\item[(iii)] If $\dim H
= 66$, then $|G(H) | \notin \{ 33, 22,11\}$.
\item[(iv)] If $\dim H = 78$, then $|G(H) |
\notin \{ 39,26, 13 \}$. \qed
\end{enumerate}
\end{cor}
Next we show that for Hopf algebras of dimension $66$, $G(H)$ does
not have order $6$.
\begin{lema} If $\dim H = 6p$ with $p < 13 $, then $|G(H)| \neq
6$.
\end{lema}
\begin{proof} First we suppose that $H$ has a simple subcoalgebra of
dimension $4$ and consider various cases. Let $G:=G(H)$, the group
of grouplikes of $H$ of order $6$, and let ${\mathcal D}$ denote the set of
simple subcoalgebras of dimension $4$.
\par (i) Suppose that $H_0 = \Bbbk G \oplus {\mathcal M}^\ast(2,\Bbbk)^3$ so
that $\dim H_0 = 18$. Since, by Remarks \ref{rm: rpq not gen by C}
and \ref{rmk:dimrpq-properties},
no $D \in {\mathcal D}$ is stable by the antipode, then no $D \in {\mathcal D}$ can be
fixed by $S^2$ either. Thus if $P_1^{1,D}$ is nondegenerate, so are
$P_1^{1,S^2(D)}$, $P_1^{S(D),1}$ and $P_1^{S^3(D)=D,1}$ and $ 2 \dim
P_1^{G,{\mathcal D}} \geq 2(6)4 = 48$. Since $P^{G,G}$ has nonzero dimension
divisible by $6$ and $P^{{\mathcal D},{\mathcal D}}$ has nonzero dimension divisible by
$12$, then the dimension of $H$ is at least $18 + 48 + 6 + 12 = 84$,
a contradiction.
\par (ii) Suppose that $H_0 = \Bbbk G \oplus {\mathcal M}^\ast(2,\Bbbk)^{3t}$ with
$t \geq 2$ so that $\dim H_0 = 6 + 4(3t) \geq 30$. Since for some integers
$\ell,m, n \geq 1 $, $2 \dim
P^{G,{\mathcal D}} = 24 \ell$, $\dim P^{G,G} = 6m$, $\dim P^{{\mathcal D},{\mathcal D}} =
12n$, then $\dim H \geq 72$, so that we obtain a
contradiction if $p <13$.
\par (iii) Suppose that
$H_0 = \Bbbk G \oplus {\mathcal M}^\ast(2,\Bbbk)^{3t}
\oplus E_1 \ldots \oplus E_N $ where $t,N \geq 1$ and
the $E_i$ are
simple subcoalgebras of dimension greater than $4$. Let
$\mathcal{E}$ denote the set of $E_i$ and $\mathcal{D}$ the set of
simple subcoalgebras of dimension $4$. Then $\dim H_0 \geq 6 + 12 +
18 = 36$. If $P^{G,\mathcal{E}} \neq 0$, then $2 \dim
P^{G,\mathcal{E}} \geq 2(6)(3) = 36$, $\dim
P^{\mathcal{E},\mathcal{E}} \geq 9$ and so $\dim H \geq 81$,
contradiction. Thus $P^{G,{\mathcal D}} \neq 0$. If $t=1$, then as in (i)
above $2 \dim P_1^{G,{\mathcal D}} \geq 2(6)4 = 48$, so that $\dim H \geq 36 +
48 = 84$, a contradiction. If $t \geq 2$, then $\dim H_0 \geq 48$
and
$2 \dim P_1^{G,{\mathcal D}} \geq 24$ so that $\dim H_1 \geq 72$.
But $P_2^{{\mathcal D},{\mathcal D}}$, $P_2^{G,G} $ are
nondegenerate, so that $\dim H_2 \geq 80$,
a contradiction.
Now suppose that $H$ has no simple subcoalgebras of
dimension $4$ and $H_0 = \Bbbk G \oplus E_1 \ldots \oplus E_t$ where the
$E_i$ are simple subcoalgebras of dimension at least $9$
so that $\dim H_0 \geq 6 + 18 =
24$. Let $\mathcal{E}$ denote the set of simple coalgebras $E_i$.
Then $2 \dim P_1^{G,\mathcal{E}} \geq 2(6)(3) = 36$, $\dim P^{G,G}
\geq 6$, $\dim P^{\mathcal{E},\mathcal{E}} \geq 9$ and must be
divisible by $6$ so that $\dim P^{\mathcal{E},\mathcal{E}} \geq 12$.
But also $\dim P^{\mathcal{E},\mathcal{E}} $ must be a sum of
squares larger than $4$ so that $\dim P^{\mathcal{E},\mathcal{E}}
>12$. Thus $\dim H > 24 + 36 + 6 + 12 = 78$, a contradiction.
\end{proof}
Note that in the proof above, the only place where $p \neq 13$ was
used was in case (ii). There if $p=13$ we must have that $\ell=n=1$
and $m=2$.
\par Next we show that for dimension $70$ the group of grouplikes must
have order $1$ or $2$.
\begin{lema} \label{lem:dimH70Gn5}
If $\dim H = 70$ then $G(H) \ncong C_5$.
\end{lema}
\begin{proof} Again, we suppose first that $H$ has a simple
subcoalgebra of dimension $4$ and consider various cases. Let ${\mathcal D}$
denote the set of simple subcoalgebras of dimension $4$ and let
$G:= G(H) \cong C_5$.
\par (i) Suppose that $H_0 = \Bbbk C_5 \oplus D_1 \oplus \ldots \oplus
D_5$ where $D_i \cong {\mathcal M}^\ast(2, \Bbbk)$.
Since no $D_i$ is stable under the antipode,
we may assume that $S(D_i) = D_{i+1}$,
subscripts modulo $5$. For if $S^2(D_1) = D_1$, then $S$ would
permute $D_3,D_4,D_5$. But by Corollary \ref{cor: not order of
gplikes}, $|G(H^\ast)| \in \{ 1,2,5 \}$ and so $3$ does not divide
the order of $S$. Thus by Proposition \ref{prop:biti-dasca}(i),
$P_1^{1,D_i}$ is nondegenerate for all $i$
and $2 \dim P^{G,{\mathcal D}} \geq 2(5)(10) = 100$, a contradiction.
\par (ii) Suppose that $H_0 =
{\Bbbk C_5 \oplus } \mathcal{M}^\ast(\Bbbk, 2)^{5t}$ where $t>1$. Then $\dim
H_0 \geq 5 + 10(4) = 45$, $2 \dim P^{G,{\mathcal D}} \geq 2(5)(2) = 20$,
$\dim P^{G,G} \geq 5$, and $\dim P^{{\mathcal D},{\mathcal D}} \geq 4$, so that $\dim H
\geq 74$.
\par (iii) Let $H_0 = \Bbbk C_5 \oplus {\mathcal M}^\ast(2, \Bbbk)^{5t} \oplus
E$, where $t \geq 1$ and $0 \neq E $ is a sum of
simple subcoalgebras $E_i$ of dimension
greater than $4$. Let $\mathcal{E}$ denote the set of $E_i$. If the
dimensions of any of the $E_i$ are relatively prime to $5$, then $\dim
H_0 \geq 5 + 20 + 5(9) = 70$, a contradiction.
The only remaining case is $H_0 = \Bbbk C_5 \oplus {\mathcal M}^\ast(2,
\Bbbk)^{5}\oplus {\mathcal M}^\ast(5,\Bbbk)$; here $t=1$ or else $H=H_0$.
Then $\dim H_0 = 50$, $2 \dim P_1^{G,{\mathcal D}} \geq 2(5)(2) = 20$ and this
is a contradiction since $H \neq H_1$.
\par Thus $H$ cannot have a simple subcoalgebra of dimension $4$.
The only other possibilities for $H_0$ are $H_0 = \Bbbk C_5 \oplus
{\mathcal M}^\ast(3, \Bbbk)^5$ and $H_0 =\Bbbk C_5 \oplus {\mathcal M}^\ast(5,
\Bbbk)^t$ with $t=1,2$. In the first case, $\dim H_0 = 50$, and for
$\mathcal{E}$ the set of simple subcoalgebras of dimension $9$,
$2 \dim P^{G, \mathcal{E}} \geq 2(5)(3) = 30$, a contradiction. In
the second case, first let $t=1$ and here let $\mathcal{E}$ be the
set of simple subcoalgebras of dimension $25$. Then $\dim H_0 =30$
and $2 \dim P^{G,\mathcal{E}} \geq 2(5)(5) = 50$, a
contradiction. The proof for $t=2$ is the same.
\end{proof}
\begin{cor} If $H$ is a nonsemisimple Hopf algebra of dimension
$70$, then $|G(H)|=1,2$. \qed
\end{cor}
\begin{obs}\label{rem: not order of gplikes1}
(i) To summarize, we have that for $H$ of dimension $42, 66 $,
$|G(H)| \in \{ 1,2,3 \}$, for $H$ of dimension $70$, $|G(H)| \in \{
1,2 \}$ and for $H$ of dimension $78$, $|G(H)| \in \{1,2,3,6 \}$.
\par (ii) If $ \dim H=42$ and $G(H) \cong C_3$, then dimension arguments
such as those above show that $H$ has following form: $H_0
\cong \Bbbk C_3 \oplus C$ with $C \cong \mathcal{M}^\ast(3,\Bbbk)$,
$2\dim P^{G,C} = 18$, $\dim P^{G,G} = 3$, $\dim P^{C,C} = 9$.
\end{obs}
\section{Hopf algebras of dimension $8p$}\label{sec:8p}
In this section we prove some results for Hopf algebras of dimension $8p$.
\subsection{Hopf algebras of dimension $8$}\label{subsect: 8}
The structure of Hopf algebras of dimension $8$ or dimension $4p$
naturally plays a role in
the classification of Hopf algebras of dimension $8p$. Hopf
algebras of dimension $4p$ were discussed in Section \ref{sect:
4p}, including a complete description of the pointed ones.
For dimension $8$ the semisimple Hopf algebras are group
algebras, duals of group algebras or the noncommutative noncocommutative
semisimple Hopf algebra of dimension $8$, denoted by
$A_8$ \cite{ma-6-8}. This Hopf algebra is self-dual and $G(A_8)
\cong C_2 \times C_2$; it is constructed as an extension of $\Bbbk
[C_2 \times C_2]$ by $\Bbbk C_2$. All other Hopf algebras of
dimension $8$ are pointed
or copointed.
\par Let $\xi$ be a primitive $4^{th}$ root of 1. By \cite{stefan},
every pointed nonsemisimple Hopf algebra of dimension $8$ is
isomorphic to exactly one of the Hopf algebras listed below:
$$
\begin{array}{ll}
{\mathcal A}_2:=&\Bbbk\langle g,x,y\mid
g^2-1=x^2=y^2=gx+xg=gy+yg=xy+yx=0\rangle,
\\ \noalign{\smallskip}
&\Delta(g)=g\otimes g,\quad\Delta(x)=x\otimes 1+g\otimes x,\quad\Delta(y)=y\otimes 1+g\otimes y.
\end{array}
$$
$$
\begin{array}{l}
{\mathcal A}'_4:=\Bbbk\langle g,x\mid g^4-1=x^2=gx+xg=0\rangle,
\\ \noalign{\smallskip}
\hspace{2cm}\Delta(g)=g\otimes g,\quad\Delta(x)=x\otimes 1+g\otimes x;
\\ \noalign{\vspace{.3cm}}\end{array}
$$
$$
\begin{array}{l}
{\mathcal A}''_4:=\Bbbk\langle g,x\mid g^4-1=x^2-g^2+1=gx+xg=0\rangle,
\\ \noalign{\smallskip}
\hspace{2cm}\Delta(g)=g\otimes g,\quad\Delta(x)=x\otimes 1+g\otimes x;
\\ \noalign{\vspace{.3cm}}\end{array}
$$
$$
\begin{array}{l}
{\mathcal A}'''_{4,\xi}:=\Bbbk\langle g,x\mid g^4-1=x^2=gx-\xi xg=0\rangle,
\\ \noalign{\smallskip}
\hspace{2cm}\Delta(g)=g\otimes g,\quad\Delta(x)=x\otimes 1 + g^2\otimes x;
\\ \noalign{\vspace{.3cm}}\end{array}
$$
$$
\begin{array}{l}
{\mathcal A}_{2,2}:=\Bbbk\langle g,h,x\mid g^2=h^2=1, \,
x^2=gx+xg=hx+xh=gh-hg=0\rangle,
\\ \noalign{\smallskip}
\hspace{2cm}\Delta(g)=g\otimes g,\quad\Delta(h)=h\otimes h,\quad\Delta(x)=x\otimes 1 + g\otimes
x.
\end{array}
$$
Except for $ {\mathcal A}''_4$, these pointed Hopf algebras
have pointed duals. We have
the following isomorphisms:
${\mathcal
A}_2\simeq({\mathcal A}_2)^*$, ${\mathcal
A}'''_{4,\xi}\simeq{\mathcal A}'''_{4,-\xi}\simeq({\mathcal A}'_4)^*$
and ${\mathcal A}_{2,2}\simeq({\mathcal A}_{2,2})^*$ \cite{stefan}.
Moreover, one can check case by case
that ${\mathcal A}_2 $, ${\mathcal A}'''_{4,\xi} $
and ${\mathcal A}_{2,2} $ have sub-Hopf algebras isomorphic to
$H_{4}$ and ${\mathcal A}'_4, {\mathcal A}''_4$ do not.
\par Let $\mathcal{K} = (\mathcal{A}''_{4})^{*}$.
Up to isomorphism $ \mathcal{K} $
is the only Hopf algebra of dimension $8$ which
is neither semisimple nor pointed. The next remark is essentially \cite[Lemma 3.3]{GV}.
\begin{obs}\label{lemma:desc-A}
(i) $\mathcal{K}$ is generated as an algebra by the elements $a,b,c,d$
satisfying the relations
\begin{align*}
ab & = \xi ba & ac & = \xi ca & 0& =cb=bc & cd & = \xi dc&
bd & = \xi db\\
ad & = da & ad &=1
& 0& =b^{2}=c^{2} & a^{2}c & = b & a^{4} & =1
\end{align*}
\par (ii) The elements $a=e_{11},b=e_{12},c=e_{21},d=e_{22}$
form a matrix-like basis and
$$
\Delta(a^{2}) = a^{2}\otimes a^{2}\text{ and }
\Delta(ac) = ac\otimes a^{2} + 1\otimes ac.
$$
\par (iii) $\mathcal{K} \simeq H_{4}\oplus \mathcal M^{\ast}(2,\Bbbk)$
as coalgebras.
\end{obs}
Using Remark \ref{lemma:desc-A}, one sees
that $\mathcal{K}$ is a finite dimensional quotient of the quantum
group ${\mathcal O}_{\xi}(SL_{2})$; this is consistent with
Proposition \ref{prop:natale-stefan}.
\subsection{Nonsemisimple Hopf algebras of
dimension $ 8p $}
Throughout this section, unless otherwise stated, we will assume
that $H$ is a nonsemisimple nonpointed non-copointed Hopf algebra of dimension $8p$.
Also recall that $p$ denotes an odd prime. Our strategy will
be to study the possible orders for the grouplikes in $H$ where
$\dim H = 8p$. In this section we prove Theorem \ref{thm:8p}.
\subsubsection{Group of grouplikes divisible by $p$}
In this subsection we concentrate on general results for
Hopf algebras of dimension $8p$ with $|G(H)|$ divisible by
$p$.
\begin{prop}
\label{pr: dim not p,8p,4p}
$|G(H)| \neq 8p, 4p $ or $p$.
\end{prop}
\begin{proof}
For $H$ non-cosemisimple, $|G(H)| \neq 8p$. If $|G(H) | = 4p$,
since $H$ is not pointed, $H_0 = \Bbbk G(H) \oplus E$ with
$E$ the sum of simple coalgebras of dimension bigger than 1. Since
$4p$ must divide $\dim(E)$ by Lemma \ref{lema:andrunatale},
then we must have $H=H_0$, impossible because
$H$ is not cosemisimple.
\par If $|G(H)|=p$, then $H$ has no nontrivial skew-primitives
by Lemma \ref{lema: dim H mn rel pr}. Now
we use counting arguments as in the previous sections. Suppose that $H_0 =
\Bbbk G(H) \oplus D_1^{s_1} \oplus \ldots D_t^{s_t} $ for $D_i$ simple of
dimension $n_i^2$ and $ 2 \leq n_1 < \ldots < n_{t}$. Let ${\mathcal D}$ denote the set
of simple coalgebras $D_i$. Then by Proposition
\ref{prop:biti-dasca}(i) and Lemma \ref{lema:fukuda}(ii), $2 \dim P^{C_p, {\mathcal D}}
\geq 2pn_1 $. If $p$ divides $n_1$, then $\dim H \geq \dim H_0 + 2\dim P^{C_p,{\mathcal D}}
\geq p + p^2 + 2p^2 = p(1 + 3p) >8p$ since $p \geq 3$.
If $(p,n_1) = 1$, then $p $ divides $s_1$ and $\dim H \geq p + 4p + 4p >8p$.
Thus in each case, we arrive at a contradiction.
\end{proof}
Thus, if $|G(H)| = 2p$, then $H$ cannot have the Chevalley property.
For,
suppose that $H_0$ is a sub-Hopf algebra of $H$. Since $H$ is not pointed or cosemisimple,
$\dim H_0 = 4p$. Since the semisimple Hopf algebras $\mathcal{G}_i$ have grouplikes of order $4$,
then $H_0 \cong \Bbbk^\Gamma$ where $\Gamma$ is a nonabelian group of order $4p$. But then $H_0 =
\Bbbk^ \Gamma \cong \Bbbk G(H_0) \oplus D$ where $|G(H_0)| = 2p$ and $D$ is a sum of simple coalgebras.
By Lemma \ref{lema:andrunatale}, $D$ is a sum of matrix coalgebras of dimension $d>1$ and $2p = nd^2$
which is impossible.
\begin{remark}\label{rm: 3 5 7} As in the proof above,
we use Lemma \ref{lema: dim H mn rel pr}
together with counting arguments to eliminate
the possibility that $|G(H)| =8$ for some small dimensions.
Let $\dim H = 8p$ with $p \in \{3,5,7 \}$ and suppose $|G(H)| = 8$.
By Lemma \ref{lema: dim H mn rel pr},
$H$ has no nontrivial skew
primitive elements. Since $\dim H_0 = 8 + 8m$ for some
integer $m \geq 1$, by Lemma \ref{cor:bitidasca-p1}(ii), we have that
$\dim H \geq 16 + 40 + 4 = 60$.
\end{remark}
The next proposition shows that type $(8, 2p)$ is impossible.
\begin{prop}\label{prop:group-8-p}
If $\vert G(H)\vert=2p$ then $ H^{*} $ has no
semisimple sub-Hopf algebra $L$ of dimension $8$.
\end{prop}
\begin{proof} Suppose $H^{*}$ contains a semisimple sub-Hopf algebra $L$ of dimension $8$ and
let $\Gamma$ be a subgroup of $ G(H) $ of order $p$. Since $L^\ast$
is semisimple and has no grouplike elements of order $p$, $ \Bbbk
\Gamma \hookrightarrow H \twoheadrightarrow L^\ast$ is an exact sequence
of Hopf algebras. This implies
that $H$ is semisimple, a contradiction.
\end{proof}
The next proposition determines the coalgebra structure of $H$ when
$|G(H)| = 2p$.
\begin{prop}\label{pr: grouplikes 2p}
Suppose $|G(H)| = 2p$.
\begin{enumerate}
\item[(i)]
$H$ contains a pointed
sub-Hopf algebra $\mathcal{A}$ of dimension
$4p$ and as a coalgebra $H \cong \mathcal{A} \oplus \mathcal{M}^\ast
(2,\Bbbk)^p$.
\item[(ii)] If $H^\ast$ is generated by a simple subcoalgebra
of dimension $4$ fixed by $S_{H^\ast}^4$ then
$S_H$ has order $4$.
\end{enumerate}
\end{prop}
\begin{proof}(i) Since $H$ is not pointed,
$H_0 = \Bbbk G(H) \oplus D_1 \oplus
\ldots \oplus D_t$ where the $D_i$ are
simple coalgebras of dimension greater than $1$.
Suppose that $H_{0, mp} \neq 0$ where $m \geq 1$.
Then ${\rm dim}(H_{0,mp}) \geq 2p^2$ and thus ${\rm dim}(H_0) \geq
2p + 2p^2 = p(2 + 2p) \geq 8p$ since $p \geq 3$, and this is
impossible since $H$ is nonsemisimple. If $H_{o,d} \neq 0$ for
$(d,p) = 1$ and $d
>2$ then ${\rm dim}(H_0) \geq 2p + pd^2 = p(2+d^2) > 8p$ which is
also impossible. Thus $D_i =
\mathcal{M}^\ast(2, \Bbbk)$ for all $i$, and
$H_0 \cong \Bbbk G(H) \oplus \mathcal{M}^\ast(2, \Bbbk)^p$ as
coalgebras.
By Proposition \ref{prop:biti-dasca}(ii), $H$ has a nontrivial skew-primitive
$x$ and $x$ together
with $G(H)$ generates a pointed sub-Hopf algebra $\mathcal{A}$ of $H$ of
dimension $4p$ and (i) is proved.
(ii) By (i) there is a Hopf algebra projection
$\pi: H^\ast \rightarrow \mathcal{A}^\ast$ for $\mathcal{A}$
the pointed Hopf algebra of dimension
$4p$ from (i).
Then $S_{\mathcal{A}}$ and $S_{\mathcal{A}^\ast}$ have order $4$.
Suppose $D \cong \mathcal{M}^\ast(2,\Bbbk) \subset H^\ast$
is stable under $S_{H^\ast}^4$ and generates $H^\ast$, and suppose
that $S_{H^\ast}^4$ has order $N>1$. Let $\mathbf{e}$
be a multiplicative matrix for $D$ as in
Theorem \ref{thm:stefan} such that $S_{H^\ast}^4(e_{ij})
= \omega^{i-j}e_{ij}$ where $\omega$ is a primitive $N^{th}$
root of unity. Then if $i \neq j$, $\pi(e_{ij}) = 0$ and thus
$\dim \pi(D) <3$. By Theorem \ref{thm:2x2-matrix},
$\pi(D) \subseteq G(\mathcal{A}^\ast)$ so that $\pi(D)$
does not generate $\mathcal{A}^\ast$, contradicting
the fact that $D$ generates $H^\ast$.
\end{proof}
Next we show that if $|G(H)| = 2p$, then $H^{*}$ cannot
contain a copy of the Sweedler Hopf algebra.
\begin{prop}\label{prop:2p-sweedler}
Assume $ |G(H)| = 2p$.
Then $ H^{\ast}$ has no sub-Hopf algebra isomorphic to $H_4$.
\end{prop}
\begin{proof}
If $H^*$ contains a sub-Hopf algebra
isomorphic to $H_{4}$, there exists a Hopf algebra
epimorphism $\pi: H \to H_{4}$. Then,
by Lemma \ref{lm: dim H co dim B = dim H},
$\dim H^{\operatorname{co} \pi} = \dim\ ^{\operatorname{co} \pi}H = 2p$.
Let $G(H) = \langle c \rangle \cong C_{2p}$
and let $\Gamma = \langle c^2 \rangle \cong C_p$.
Since $p$ is odd, we have that
$\Bbbk \Gamma$ is included both in $H^{\operatorname{co}\pi}$ and $^{\operatorname{co}\pi}H $.
On the other hand,
Proposition \ref{pr: grouplikes 2p} implies that $H \simeq
\mathcal{A} \oplus D$ where $D = D_{1}
\oplus \cdots \oplus D_{p}$, with $D_{j} \simeq
\mathcal{M}^\ast(2,\Bbbk)$, for
all $1\leq j\leq p$. We will prove that for every $j$, $1 \leq j \leq p$,
$\dim D_{j}^{\operatorname{co} \pi}\geq 2$ or $\dim\ ^{\operatorname{co}\pi}D_{j} \geq 2$.
This fact leads to a contradiction. Indeed, suppose that for $n$ of the $D_j$,
$\dim D_{j}^{\operatorname{co} \pi}\geq 2$ and for the remaining $p-n$ coalgebras
$D_j$, $\dim\ ^{\operatorname{co}\pi}D_{j} \geq 2$.
Since $p$ is odd, either $2n >p$ or $2(p-n) >p$ so
that either $\dim ^{co\pi}D >p$ or $\dim D^{co\pi}>p$.
Since $\Bbbk \langle c^2 \rangle $ lies in both the left
and right coinvariants, this implies
that either $\dim ^{co\pi}H >2p$ or $\dim H^{co\pi} > 2p$,
and this gives the desired
contradiction.
Fix a simple subcoalgebra $D_{j}$ and let $K=\langle D_{j}\rangle$,
the sub-Hopf algebra of $H$ generated by $D_{j}$.
Clearly, $\dim K = 8, 2p, 4p$ or $8p$.
We write $\pi$ also for $\pi|_K$ when the meaning is clear.
If $\pi$ maps $K$ onto $H_4$, then the result
follows from Lemma \ref{lema:proj-coalg}; in particular, $ \dim K \neq 8p $.
If $\pi(K) = \Bbbk$, then $\pi|_K = \varepsilon_K$.
Hence for ${\bf d} = (d_{ij})$ a multiplicative matrix for $D_j$,
$\pi(d_{ij}) = \delta_{ij}$ and $D_j$ lies in both the
left and right coinvariants. It remains to consider
the case when $\pi(K) = \Bbbk G(H_4)= \Bbbk \langle g \rangle$
where $g$ generates $G(H_4) \simeq C_{2} $.
Assume $\dim K = 8$. Since $K$ is nonpointed, by
Subsection \ref{subsect: 8} we have that
$K = \mathcal{K} = (\mathcal{A}^{\prime \prime}_4)^\ast \cong L \oplus D_j$
as coalgebras
where $L \cong H_4$. Since $G(L) \subset G(H)$, we have that $c^p \in K$.
Suppose $\pi(c^p) =1$. Since $\pi(c)$ is a grouplike element and $|G(H_{4})|=2$,
we have that $\pi(c) = 1$. If $x$ is a nontrivial
skew-primitive in $H_4 \subset K$ such that $\pi(x) = 0$,
then $c,x$ lie in both $^{co\pi}H$ and $H^{co\pi}$,
contradicting the fact that the dimension of the coinvariants is $2p$.
Thus $\pi(c^p) = \pi(c) = g$, $^{co\pi}L = \{1, gx \}$, $L^{co\pi} = \{ 1, x \}$.
Since $\dim {^{co\pi}K} = \dim K^{co\pi} = 4$,
then we must have that $\dim {^{co\pi}D_j} = \dim D_j^{co\pi} = 2$.
Next we will show that if $\dim \pi(K) =2$ then $K$ cannot have
dimension $4p$ or $2p$. Suppose that $\dim K = 4p$. Then $\dim
K^{co\pi} = 2p = \dim {^{co\pi}K}$ so that $K^{co\pi} = H^{co\pi}$
and the same for the left coinvariants. Thus $\Bbbk \langle
c^2\rangle \cong \Bbbk C_{ p} \subset K$. If $K$ is nonpointed
semisimple, by the classification of semisimple Hopf algebras of
dimension $4p$ in Section \ref{sec: 4p general}, $p$ does not divide
the order of $G(K)$ either if $K$ is the dual of a group algebra or
if $K$ is one of the semisimple Hopf algebras in \cite{G}. If $K$ is
not semisimple then by Theorem \ref{th: ChNg I}, $K$ is pointed, a
contradiction.
Finally, suppose now that $\dim K = 2p$ so that $K \cong
\Bbbk^{\mathbb{D}_{p}}$ and $\dim K^{co\pi} = p$. Let $\tilde{K} =
\langle K, \Gamma\rangle$ be the sub-Hopf algebra of $H$ generated
by $K$ and $\Bbbk \langle c^2 \rangle$. Since $\tilde{K}$ is
semisimple, then $\tilde{K} \neq H$ and so has dimension $4p$. But
$\tilde{K}$ is then a nonpointed semisimple sub-Hopf algebra of $H$
of dimension $4p$ with a grouplike of order $p$,
and this is impossible by the proof in the paragraph above.
\end{proof}
The next proposition shows that type $(2p,r)$ can occur only if $r=2,4$.
\begin{prop}\label{prop:2p-exact}\label{pr: not 2p 2p}
Suppose $ |G(H)| =2p $. Then
\begin{enumerate}
\item[(i)] $ H $ fits into an exact sequence
of Hopf algebras
$ \mathcal{A} \hookrightarrow H \twoheadrightarrow \Bbbk C_{2}$,
where $ \mathcal{A} $ is a pointed Hopf algebra of dimension
$ 4p $.
\item[(ii)] If $ \mathcal{A}^{*} $ is nonpointed, i.e., $\mathcal{A} \cong \mathcal{A}(-1,1)$, then
$\dim H^\ast_0 = 8p-4$,
$G(H^{*})\cong C_4$ and $H^\ast$ has a sub-Hopf algebra
isomorphic to $\mathcal{A}_4^{\prime \prime}$.
\item[(iii)]If $\mathcal{A}^\ast$ is pointed then
$\dim H^\ast_0 = 4p$ and $|G(H^\ast)|$ is $2$ or $4$. If $H^\ast$ has a nontrivial
skew-primitive element, then $H^\ast$ has a sub-Hopf algebra isomorphic to $\mathcal{A}_4^{\prime \prime}$.
\end{enumerate}
\end{prop}
\begin{proof}
(i) Proposition \ref{pr: grouplikes 2p} implies that
$ H\simeq \mathcal{A} \oplus \mathcal{M}^{\ast}(2,k)^{p} $,
with $\mathcal{A}$ a pointed Hopf algebra of dimension $4p$.
Dualizing this inclusion we get a Hopf algebra epimorphism
$\pi: H^{*}\twoheadrightarrow \mathcal{A}^{*}$
and $\dim H = 2\dim \mathcal{A}$. Thus by Proposition
\ref{prop:R-skew}, $R:= (H^\ast)^{co\pi} = \Bbbk\{1,x\}$ with $x$
a (possibly trivial) $(1, g)$-primitive
element for some grouplike $g\in G(H)$ with $g^2 =1$. If $x$ is a
nontrivial skew-primitive, then by Proposition \ref{prop:R-skew},
$H^\ast$ has a sub-Hopf algebra isomorphic to $H_4$ and this is
impossible by Proposition \ref{prop:2p-sweedler}.
Thus $x \in \Bbbk G(H^{*})$ and $R$ is a Hopf algebra
isomorphic to the group algebra $ \Bbbk C_{2} $. In particular,
$H$ fits into the exact sequence of Hopf algebras
$\mathcal{A} \hookrightarrow H \twoheadrightarrow \Bbbk C_{2}$.
(ii)
Suppose now that $\mathcal{A}^{*}$ is nonpointed.
Recall from Subsection \ref{sect: 4p} that then
$\mathcal{A} \cong \mathcal{A}(-1,1)$ and
$ \mathcal{A}^{*}\simeq H_{4} \oplus
\mathcal{M}^{\ast}(2,\Bbbk)^{p-1} $ as
coalgebras. Hence $\dim (\mathcal{A}^{*})_{0} = 4p-2$ and by Proposition
\ref{prop:exact-dim-cor}, $\dim (H^{*})_{0} = 8p-4$. Thus
$ H^{*} $ contains a nontrivial skew-primitive
element, since otherwise Proposition
\ref{prop:biti-dasca} gives a contradiction.
Thus $|G(H^\ast)| >1$. Since
$ \dim H^{*}- \dim (H^{*})_{0}=4 $ is divisible
by $|G(H^\ast)|$ we have that $ |G(H^\ast)|$ is $2$ or $ 4$.
But if $G(H^\ast) \cong C_2$ or if
$G(H^\ast) \cong C_2 \times C_2$,
then $H^\ast$ would contain
a sub-Hopf algebra isomorphic to
$H_4$, and this is impossible by Proposition
\ref{prop:2p-sweedler}.
\par Thus $H^\ast$ has a pointed sub-Hopf algebra $L$ with $G(L)\cong C_4$
and so $L$ has dimension $8$.
Then there is a Hopf algebra epimorphism $\rho: H \rightarrow L^\ast$ and $H^{co \rho} \cong \Bbbk C_p$ so that
we have an exact sequence of Hopf algebras
$
\Bbbk C_p \hookrightarrow H \twoheadrightarrow L^\ast,
$ and dualizing we obtain the exact sequence
$L \hookrightarrow H^\ast \twoheadrightarrow \Bbbk C_p
$. By Proposition \ref{prop:exact-dim-cor}, $ 6p = \dim H_0 = p \dim L^\ast_0 $.
Thus we must have that $\dim L^\ast_0 = 6$
and $L^\ast $ cannot be pointed. We must have that $L^\ast \cong \mathcal{K}$
and $L \cong \mathcal{A}_4^{\prime \prime}$.
(iii) Now suppose that $\mathcal{A}^\ast$ is pointed so that
$G(\mathcal{A}^\ast) \cong C_{ 2p}$ by
Subsection \ref{sect: 4p}. Then
again using Proposition \ref{prop:exact-dim-cor} we have that
$\dim H^\ast_0 = 4p$. If $|G(H^\ast)| \neq 2,4$,
since $H^\ast$ has a grouplike of order $2$,
by Proposition \ref{pr: dim not p,8p,4p} and Proposition \ref{prop:group-8-p},
$|G(H^\ast)|$ must be $2p$.
Then $H^\ast_0 = \Bbbk C_{2p} \oplus E$
where $\dim E = 2p$ and $E$ is a sum of
simple subcoalgebras
of dimension greater than $1$.
No simple subcoalgebra can have dimension
divisible by $p$ since $p^2 > 2p$.
But if a simple subcoalgebra has dimension
$d^2$ with $1 < d $ and $(d,p)=1$ then $H^\ast_0$ must contain at least
$p$ such simple coalgebras and $d^2p>2p$,
a contradiction.
If $H^\ast$ has a nontrivial skew-primitive
element then the same argument as in (ii) above shows that $H^\ast$ has
a sub-Hopf algebra isomorphic to $\mathcal{A}_4^{\prime \prime}$.
\end{proof}
\begin{cor}\label{cor: 3 5 C4} If
$|G(H)| = 2p$ with $p=3$ or $ 5$,
then $H^\ast$ has a sub-Hopf algebra isomorphic to $\mathcal{A}_4^{\prime \prime}$.
\end{cor}
\begin{proof}
It suffices to show that $H^\ast$ has a nontrivial skew-primitive element
and then the statement follows from Proposition \ref{pr: not 2p 2p}.
We may assume that we are in case (iii) of Proposition \ref{pr: not 2p 2p},
so that
$\dim H^\ast_0 = 4p$.
Let $p=3$. By Proposition \ref{prop:biti-dasca}, if $H^\ast$ has
no skew-primitive, then for $|G(H^\ast)| \geq 2$,
$24 = \dim H^\ast \geq 12 + 2(5) + 4 = 26$,
a contradiction.
\par If $p = 5$, and $ |G(H^\ast)|= 2$, then $\dim H^\ast_0 = 20$
forces $H^\ast_0 \cong kC_2 \oplus \mathcal{M}^\ast(3,\Bbbk)^2$.
Then if $H^\ast$ has no nontrivial skew-primitive,
Proposition \ref{prop:biti-dasca} implies that
$40 \geq 20 + 2(7) +9 = 43$, a contradiction. If $p = 5$ and
$|G(H^\ast)| = 4$, then Proposition \ref{prop:biti-dasca}
implies that $40 \geq 20 + 4(5) + 4 = 44 $, again a contradiction.
\end{proof}
Now we can give the proof of Theorem A.
\bigbreak \noindent {\bf Proof of Theorem A.} Let $ H $ be a
nonsemisimple Hopf algebra of dimension $ 8p $. By Proposition
\ref{pr: dim not p,8p,4p} we have that $ |G(H) | \in \{1,2,4,8, 2p
\}$. If $ |G(H) |=2p$, then by Proposition \ref{pr: not 2p 2p} we
have that $2 \leq |G(H^\ast)| \leq 4$ and the theorem is proved.
\qed
\subsubsection{Further results for some specific primes}
In this section, we improve the results of Theorem A for some
specific primes $p$.
\begin{prop}\label{prop:fixed under left}
Suppose $|G(H)|=2p$, $G(H^\ast) \cong C_4 = \langle g \rangle$ and
$H^\ast$ contains a simple subcoalgebra $D$ of dimension $4$.
Also assume that $(H^\ast)_0$ is not a sub-Hopf algebra of $H^\ast$,
i.e., $H^\ast$ does not have the Chevalley property. Then
\begin{enumerate}
\item[(i)] $D$ generates $H^\ast$ as a Hopf algebra;
\item[(ii)] $D$ is not fixed by $L_{g^2}$, $R_{g^2}$, i.e.,
by left or right multiplication by $g^2$. If $g^2 \notin Z(H^\ast)$
then $D$ is also not fixed by the adjoint action of $g^2$.
\end{enumerate}
\end{prop}
\begin{proof}
(i) Let $L = \langle D \rangle$
be the sub-Hopf algebra of $H^\ast$ generated by $D $, then $L$ is
a nonpointed Hopf algebra of dimension $8, 2p,4p$ or $8p$.
We will show that each dimension except $8p$
is impossible.
Suppose the dimension of $L$ is $8$.
Then, by Section \ref{subsect: 8},
either $G(L) \cong C_2 \times C_2$, impossible since $G(H^\ast) \cong C_4$
or else $L$ contains a copy of $H_4$,
impossible by Proposition \ref{prop:2p-sweedler}.
Suppose the dimension of $L$ is $2p$ so that $L \cong
\Bbbk^{\mathbb{D}_p}$. Let ${\mathcal L} = \langle L, \Bbbk \langle g \rangle
\rangle$ be the semisimple sub-Hopf algebra of $H^{*}$ generated by
$L$ and by $g$, a generator of $G(H^\ast)$. Then the dimension of
${\mathcal L}$ is divisible by $2p$ and by $4$ so it must be $4p$. Suppose
that ${\mathcal L} \cong \mathcal{G}_i$, $i=0,1$, one of the self-dual
semisimple Hopf algebras of dimension $4p$ from \cite{G}.
Then the inclusion
${\mathcal L} \hookrightarrow H^\ast$ and the fact that $\mathcal{G}_i$ is
self-dual gives a Hopf algebra projection $\pi$ from $H $ onto $
\mathcal{G}_i$. Denote by $h$ the generator of $G(H)$. Since
$G(\mathcal{G}_i)$ has order $4$, we have that $\pi(h^{2n}) = 1$ so
that $h^{2n} \in H^{co\pi}$ for $ 0 \leq n \leq p-1$. This
contradicts Lemma \ref{lm: dim H co dim B = dim H} which states
that the dimension of $H^{co\pi}$ is $2$. Finally suppose that ${\mathcal L}
\cong \Bbbk^\Gamma$ for $\Gamma $ a nonabelian group of order $4p$.
Then there is a Hopf algebra projection $\pi$ from $H$ onto $\Bbbk
\Gamma$ and by Proposition \ref{prop:R-skew}, $H^{ co\pi} = \Bbbk
\{1,x \}$ where $0 \neq x$ is $(1,h^p)$-primitive. If $x$ is a
trivial skew-primitive, i.e., $x = 1 - h^p$ then $H^{ co\pi} \cong
\Bbbk C_2$. But then by Lemma \ref{lm: dim H co dim B = dim H}, the
sequence
$ \Bbbk C_2 \overset{\imath}\hookrightarrow
H\overset{\pi}\twoheadrightarrow \Bbbk \Gamma $ is exact so that $H$
is semisimple, a contradiction. Thus
$x$ is nontrivial and $H$ has a sub-Hopf algebra isomorphic to $H_4$.
This means that the pointed sub-Hopf algebra $\mathcal{A}$
of $H$ of dimension
$4p$ guaranteed by Proposition \ref{pr: grouplikes 2p}
is either $\mathcal{A}(-1,0)^\ast$ or is $H_4 \otimes C_p$.
In either case the dual is pointed and so we are in Case (iii)
of Proposition \ref{pr: not 2p 2p}. Then $\dim H^\ast_0 =4p$
and so ${\mathcal L} = H^\ast_0$. Since we assumed that
$H^\ast$ does not have the Chevalley property, this is a contradiction.
Suppose the dimension of $L$ is $4p$. By its construction $L$ is not
pointed. Also $L$ cannot be semisimple by the arguments in the
case above where $\dim {\mathcal L} = 4p$. If $L$ is copointed then $L \cong
\mathcal{A}(-1,1)^\ast \cong H_{4} \oplus \mathcal{M}^\ast(2,\Bbbk)^{p-1}$, which
is impossible since $H^\ast$ does not contain a copy of $H_{4}$.
Thus both ${L}$ and ${L}^\ast$ are nonsemisimple, nonpointed so that by
Theorem \ref{th: ChNg I},
$|G(L)| ,|G(L^{*})|\leq 2$.
But since $L$ is a sub-Hopf algebra of $H^{*}$, we have a Hopf
algebra epimorphism $\pi: H\twoheadrightarrow L^{*}$ with $\dim
H^{\operatorname{co} \pi} = 2$. This implies that $\pi(c^{2}) \neq 1$ and
consequently $p\leq|G(L^{*})|\leq 2 $, a contradiction. Thus, this
case is also impossible and we have proved (i), namely that $L=H$.
(ii) Now suppose that $g^2L = L$; if L is stable under $R_{g^2}$
or $\ad_\ell(g^2)$ with $g^2 \notin Z(H^\ast)$, the argument is the same.
Let $\mathcal{A} \subset H$ be the $4p$-dimensional
pointed sub-Hopf algebra of $H$
from Proposition \ref{pr: grouplikes 2p};
there is a Hopf algebra epimorphism $\pi: H^\ast \rightarrow \mathcal{A}^\ast$.
If $\mathcal{A}^\ast$ is pointed, then $G(\mathcal{A}^\ast) \cong C_{2p}$,
otherwise $G(\mathcal{A}^\ast) \cong C_2$. In either case, $\pi(g^2)
= 1$. Then Lemma \ref{truco util} implies that $\pi(H^\ast)
\subseteq \Bbbk G(\mathcal{A}^\ast)$, and this contradiction
finishes the proof.
\end{proof}
\begin{cor}\label{cor:2p-4-4p}
Assume $|G(H)| = 2p$ with
$p = 3,7,11$ and suppose that the $4p$ dimensional pointed sub-Hopf
algebra $\mathcal{A}$ of $H$ from Proposition \ref{pr: grouplikes 2p}
has pointed dual. If $H^\ast$ does not have the Chevalley property,
then $G(H^\ast) \ncong C_4$.
\end{cor}
\begin{proof}Suppose that $G(H^\ast) = \langle g \rangle \cong C_4$.
With the notation of Proposition \ref{prop:2p-exact},
the assumption that $\mathcal{A}^\ast$ is pointed means
that we are in Case (iii) so that $\dim H_0^\ast = 4p$.
With notation as in Proposition \ref{prop:fixed under left},
it remains to show that for $p=3,7,11$, then $H^\ast$ has a
simple subcoalgebra of dimension $4$ stable
under $L_{g^2}$ and that will give a contradiction.
If $p=3$, $H^\ast_0 \cong \Bbbk C_4
\oplus \mathcal{M}^\ast(2,\Bbbk)^2$ so since the order of $g$ is
$4$, the statement is clear.
If $p=7$ then either $H^\ast_0 \cong \Bbbk C_4 \oplus
\mathcal{M}^\ast(4,\Bbbk)
\oplus \mathcal{M}^\ast(2,\Bbbk)^2$ or else $H^\ast_0 \cong \Bbbk C_4
\oplus \mathcal{M}^\ast(2,\Bbbk)^6 $ and in either case, the statement follows.
If $p=11$ then $H^\ast_0 \cong \Bbbk C_4 \oplus D$ where $D$ is one of
the following: $\mathcal{M}^\ast(2,\Bbbk)^{10}$ or
$\mathcal{M}^\ast(3,\Bbbk)^4 \oplus \mathcal{M}^\ast(2,\Bbbk)$ or
$\mathcal{M}^\ast(4,\Bbbk) \oplus E$ where $E$
has dimension $24$. Then $E \cong \mathcal{M}^\ast(2,\Bbbk)^6$
or $E \cong \mathcal{M}^\ast(2,\Bbbk)^2 \oplus \mathcal{M}^\ast(4,\Bbbk)$.
In any case, $H^\ast$ has a simple $4$-dimensional
subcoalgebra stable under $L_{g^2}$.
\end{proof}
\begin{cor}\label{cor: 24 type 6,4 corad} Let $\dim H = 24$ and suppose $H$ is of type $(6,4)$
and $H^\ast$ does not have
the Chevalley property.
Then $H$ fits into an exact sequence
$\mathcal{A}(-1,1) \hookrightarrow H \twoheadrightarrow \Bbbk C_2$,
in other words, we are in Case (ii) of
Proposition \ref{pr: not 2p 2p}. Then we have that either
$H_0^\ast \cong \Bbbk C_4 \oplus \mathcal{M}^\ast(2,\Bbbk)^4$
or else $H_0^\ast \cong \Bbbk C_4 \oplus \mathcal{M}^\ast(4,\Bbbk)$.
\end{cor}
\begin{proof}
The statement follows from Corollary \ref{cor: 3 5 C4}
and Corollary \ref{cor:2p-4-4p}.
\end{proof}
\subsection{Generalizations of results of Cheng and Ng}
In this section we generalize some results of \cite{ChNg} to study
Hopf algebras of dimension $8p$ with group
of grouplikes of order $2^i$. We assume throughout this section that
$H$ is nonsemisimple, nonpointed, non-copointed
and has dimension $8p$.
The following propositions are similar to
\cite[Prop. 3.2]{ChNg}.
\begin{prop}\label{pr:boson-2-4}\label{pr:4-4-boson}
\begin{enumerate}
\item[(i)] If $H$ contains a pointed sub-Hopf algebra $K$ of dimension $8$,
then $G(H) = G(K)$.
\item[(ii)] Assume $H\simeq R\# K$ where $K$ is
pointed and copointed of dimension $8$,
and $R$ is a braided Hopf algebra of dimension $p$ in $ \ydk $. Then
$G(H) \cong G( H^{*}) $ so that $H$ is of type $(4,4)$ or type
$(2,2)$.
\item[(iii)] Suppose that $|G(H)| = 2^t$ for $t \in \{1,2,3 \}$
and suppose that $H^\ast$ contains
a sub-Hopf algebra $L$ of dimension $8$ so that there is a Hopf algebra
epimorphism $\pi: H \rightarrow L^\ast$. Then $\pi$ is an injective
Hopf algebra map from $\Bbbk G(H)$ to $\Bbbk G(L^\ast)$.
\item[(iv)] Suppose that $H$ contains a pointed
sub-Hopf algebra $K$ of dimension $8$
with $|G(K)| = 4$ and $H^\ast$ contains
a pointed sub-Hopf algebra $L$ of dimension $8$.
Then $K \cong L^\ast$ and $H \cong R \#K$ where $R$ is a braided Hopf algebra in
$^K_K\mathcal{YD}$ of dimension $p$.
\end{enumerate}
\end{prop}
\begin{proof} (i) Suppose $H$ has a grouplike
element $g$ such that $g
\notin G(K)$. Then $\langle g, K \rangle$, the sub-Hopf algebra of
$H$ generated by $g$ and $K$, is pointed and has dimension greater
than $8$ and divisible by $8$, so must be all of $H$. This is a
contradiction since $H$ is not pointed.
(ii) Assume $H\simeq R\# K$ with $K$ and $K^{*}$ pointed. By (i),
$G(H) = G(K)$. Since $H^{*}\simeq R^{*}\# K^{*}$, and
$K^\ast$ is pointed by assumption, then again by (i), $G(H^\ast) =
G(K^\ast)$. By Section \ref{subsect: 8}, since $K$ is pointed and
copointed, then $G(K) \cong G(K^\ast)$ and thus $G(H) \cong
G(H^\ast)$.
(iii) Dualizing the inclusion $L \subset H^\ast$,
we get a Hopf algebra
epimorphism $\pi: H \longrightarrow L^\ast$.
Since $\dim L^\ast = 8$, $\dim R = p$ where
$R = H^{co \pi}$ is the algebra of coinvariants.
Suppose that $\pi(g) = 1$ for some $g \in G(H)$
and let $\Gamma = \langle g\rangle$. Then
$\Bbbk \Gamma \subset R$ and $R$ is
a left $(H, \Bbbk \Gamma)$-Hopf module
where the left action of $\Bbbk \Gamma$ on $R$
is left multiplication. Then by the Nichols-Zoeller
theorem, $R$ is a free $\Bbbk \Gamma$-module
which is impossible unless $\Gamma = \{1 \}$.
Thus $\pi$ is an injective Hopf algebra
map on $\Bbbk G(H)$ as claimed.
\par (iv)
Let $\pi: H \longrightarrow L^\ast$ and $R = H^{co \pi}$
as in the proof of (iii). Let $x$ be a nontrivial
$(g, 1)$-primitive in $K$. We wish to show that $\pi(x)$ is a nontrivial skew-primitive in $L^\ast$ and then
$\pi$ will be an isomorphism from $K$ to $L^\ast$, proving the statement.
\par By (i), $G(H) = G(K)$ and $G(H^\ast) = G(L)$. By (iii), since $|G(H)|=4$, $|G(L^\ast)| \geq 4$, and since $L$
is pointed, by the description of the duals of pointed Hopf algebras of dimension $8$ in Section \ref{subsect: 8}, $L^\ast$
must also be pointed. Again, by Section \ref{subsect: 8}, $G(L) \cong G(L^\ast)$.
Let $G$ denote $G(H) \cong G(K) \cong G(L) \cong G(L^\ast) \cong G(H^\ast)$.
\par By (iii), $\pi(x)$ is $(g,1)$-primitive. Suppose that $\pi(K) \subseteq \Bbbk G \subset L^\ast$.
Then $\pi(x) = \lambda(g-1)$ with $\lambda \in \Bbbk$.
But this implies that $\pi(x^2) = \lambda^{2}(g^2 -2g +1)$, which is only possible if $\lambda = 0$ since
$x^2 = 0$ or $x^2 = g^2 -1$. Thus $\Bbbk\{ 1,x \} \subset R = H^{co\pi}$. On the other hand, if
$V$ denotes the vector space with basis $\{ hx^i | h \in G(H), h \neq 1, i = 0,1 \}$, then $V \cap R = \{ 0 \}$.
Since $\dim R =p$,
there is some $0 \neq z \in R$ such that $z \notin K$. Then $ \langle K,z \rangle$, the sub-Hopf algebra
generated by $K$ and $z$, has dimension greater than $8$ and divisible by $8$ so is all of $H$.
By Lemma \ref{lm: on R pi=epsilon}, $\pi(z) \in \Bbbk$. Thus $\pi(H) \subseteq \Bbbk G$, a contradiction, and so
$\pi(z)$ is a nontrivial skew-primitive in $L^\ast$.
\end{proof}
\begin{cor} \label{cor: 4.18} Suppose that $H$ is of type $(4,4)$
and $H,H^\ast$ each
have a nontrivial skew-primitive element. Then $H \cong R \# K$ where
$K,K^\ast$ are pointed Hopf algebras
of dimension $8$ and $R$ is a braided Hopf algebra in $_K^K\mathcal{YD}$
of dimension $p$.
\end{cor}
\begin{proof} By Proposition \ref{pr:4-4-boson}(iv),
it remains only to show that $H$, $H^\ast$ have
pointed sub-Hopf algebras of dimension $8$.
Let $K = \langle G(H),x \rangle$, the sub-Hopf algebra of $H$ generated by $G(H)$ and a
nontrivial skew-primitive element. Then $\dim K <8p$ and
is divisible by $4$ so is either $8$ or $4p$.
Since
all pointed Hopf algebras of dimension $4p$ have group of
grouplikes of order $2p$ (see Section \ref{sect: 4p}), $\dim K = 8$. Similarly $H^\ast$ has a pointed sub-Hopf
algebra of dimension $8$.
\end{proof}
The following proposition follows the proof of
\cite[Thm. 3.1]{ChNg}.
\begin{prop}\label{pr:R-semisimple}
Let $K$ be a Hopf algebra and
$R$ be a braided Hopf algebra in $ \ydk $ of odd dimension.
If the order of the antipode in the bosonization $R\#K$ is a power
of $ 2 $, then $R$ and $R^{*}$ are semisimple.
\end{prop}
\begin{proof}
Let $H = R\#K$ be the Radford biproduct or bosonization
of $R$ with $K$. As $R$ is stable by $ \cS_{H}^2 $,
by \cite[Thm. 7.3]{andrussch}
it suffices to prove that $\Tr(\cS_{H}^{2}|_{R}) \neq 0$. Clearly, the
order of $ \cS_{H}^{2}|_{R} $ divides the order of $ \cS_{H}^{2}
$ and hence is a power of $ 2 $. If $\Tr(\cS_{H}^{2}|_{R}) = 0 $, then
by \cite[Lemma 1.4]{Ng}
$\dim R$ is even, a contradiction. Thus $R$ is semisimple.
The same proof holds for $R^{*}$ since $H^{*} \simeq R^{*}\# K^{*}$.
\end{proof}
Recall that
$H$ nonsemisimple of
dimension $24$
with $|G(H)|=4$ has a nontrivial skew-primitive element by
Proposition
\ref{prop:biti-dasca}. Then Corollary \ref{cor: 4.18} and Proposition \ref{pr:R-semisimple}
imply the next statement.
\begin{cor}\label{cor:24-4-4-1}
Suppose $\dim H =24$ and $H$ is of type $(4,4)$.
Then $H\simeq R\# K$ with $K$ and
$ K^{*} $ pointed Hopf algebras of dimension $ 8 $ and
$R$ a semisimple Hopf algebra in $ \ydk $ of dimension $ 3 $.\qed
\end{cor}
The following lemmata generalize results of Cheng and Ng
used to study $H_4$-module algebras, in particular \cite[3.4,3.5]{ChNg}.
\begin{lema}\label{lem:xe=0}
Let $K$ be a pointed Hopf algebra generated by grouplikes and skew-primitives,
and
let $ A $ be a finite dimensional left $K$-module algebra. If $A$ is
a semisimple algebra and $ e $ is a central idempotent of $ A $ such
that the two-sided ideal $I=Ae$ is stable by the action of $G(K)$,
then $I$ is a $K$-submodule of $ A $ with $g\cdot e = e$ for all $g\in G(K)$ and $x\cdot
e = 0$ for any skew-primitive element $x$.
\end{lema}
\begin{proof}
Write $e=e_{1} + \cdots + e_{t}$ as a sum of orthogonal primitive
central idempotents. Since $I$ is stable under the action of $G(K)$, then
$G(K)$ permutes the primitive idempotents $ e_{1},\ldots ,e_{t} $ and
hence $g\cdot e = e$ for all $g\in G(K)$.
Let $x$
be a $(1,g)$-primitive. Then $x\cdot e = x\cdot e^{2} = (x\cdot e)e
+ (g\cdot e)(x\cdot e) = 2(x\cdot e)e$. Thus $x \cdot e \in I$ so that
$ x \cdot e = (x \cdot e ) e $ and then $x \cdot e = 2 (x \cdot e)$ implying that $x\cdot e=0$.
Moreover, since $x\cdot (ae) = (x\cdot a)e$ for all $a\in A$, it
follows that $I$ is stable under the action of $x$ and since $K$ is
generated by grouplikes and skew-primitives, $I$ is a $K$-submodule
of $A$. \end{proof}
\begin{lema}\label{lem:x-R=0}
Let $K$ be a pointed Hopf algebra with
abelian group of grouplikes. Let $ A $
be a semisimple braided Hopf algebra in $\ydk$.
If $I$ is a one-dimensional ideal of $A$, then $x\cdot I =0$ for all
skew-primitive elements $x$ of $K$.
\end{lema}
\begin{proof}
Let $x$ be a $(1,g)$-primitive element of $ K $ and
denote by $\overline{K}$ the pointed sub-Hopf
algebra of $K$ generated by $x$ and $g$. Note that since
$G(K)$ is abelian, then $g x g^{-1} = \chi(g) x = \omega x$
for some character $\chi$ of $G$ and $1 \neq \omega$ an $N$-th
root of unity with $N= \operatorname{ord} g$.
Since $A$ is semisimple, $I = Ae_{1} = \Bbbk e_{1}$
for some
central primitive idempotent. Thus we need to prove that
$x\cdot e_{1} =0$. If $g\cdot e_{1}=e_{1}$, then the result follows from
Lemma \ref{lem:xe=0} using $ \overline{K} $ instead of $ K $.
Assume $g\cdot e_{1} \neq e_{1}$ and let $e_{1},\ldots ,e_{t}$ be
representatives of the set $\{g^{i}\cdot e_{1}\}_{0\leq i<N}$
of primitive central
idempotents of $ A $; in
particular $t$ divides $N$ and $g\cdot e_{t}=e_{1}$.
Let $e = e_{1}+\cdots +e_{t}$, then $\overline{I} = Ae$ is
a two-sided ideal of $ A $ which is stable under the action of
$\Gamma = \langle g\rangle$. Hence, by Lemma \ref{lem:xe=0}
we have that $x\cdot e = 0$.
Since $\Delta(x) = x\otimes 1 + g\otimes x$, we have that
$x\cdot e_{i} = x\cdot e_{i}^2 = \alpha_{i,i} e_{i} + \alpha_{i,i+1} e_{i+1}$
and $x\cdot e_{t} = \alpha_{t,t} e_{t} + \alpha_{t,1} e_{1}$
for some $\alpha_{ij}\in \Bbbk$. Using
that $x\cdot e =0$ we get that
$\alpha_{i-1,i} + \alpha_{i,i} =0 $ and $\alpha_{t,1} + \alpha_{1,1}=0$
for all $2\leq i \leq t$.
On the other hand, using that $gxg^{-1} = \omega x$ we obtain
that $\omega \alpha_{i,i} = \alpha_{i-1,i-1}$ for all $2\leq i \leq t$,
$\omega \alpha_{i,i+1} = \alpha_{i-1,i}$ for all $2\leq i \leq t-1$
and $\omega \alpha_{1,1} = \alpha_{t,t}$,
$\omega \alpha_{1,2} = \alpha_{t,1}$ and
$\omega \alpha_{t,1} = \alpha_{t-1,t}$.
Hence
$\alpha_{1,2} = \omega^{-1}\alpha_{t,1} = -
\omega^{-1}\alpha_{1,1}$ and $x\cdot e_{1} =
\alpha_{1,1}(e_{1} - \omega^{-1}e_{2})$.
\par Denote by $\lambda_{A}$ the right integral of $ A $. Then
by \cite[Thm. 5.8, Rmk. 5.9]{FMS},
see also \cite[Eq. (3.4)]{ChNg},
for $k \in K$, $a \in A$, we have
$\lambda_{A}(k\cdot a) = \eps_{K}(k) \lambda_{A}(a)$.
Then $g\cdot e_{1} =e_{2}$ implies that
$\lambda_{A}(e_{1})= \lambda_{A}(e_{2})$ and consequently
$$0= \eps_{K}(x)\lambda_{A}(e_{1})=
\lambda_{A}(x\cdot e_{1}) =
\alpha_{1,1}(\lambda_{A}(e_{1})-\omega^{-1}\lambda_{A}(e_{2}))
=\alpha_{1,1}(1-\omega^{-1})\lambda_{A}(e_{1}).$$
This implies that $\alpha_{1,1}=0$, since $\omega^{-1}\neq 1$
and $\lambda_{A}(e_{1})\neq 0$ because the kernel
of a right integral does not contain any nontrivial
ideal. Hence $x\cdot e_{1}=0$ and the lemma is proved.
\end{proof}
\begin{prop}\label{pr: boson implies chevalley}
Suppose that $H \cong R \#K$ where $K$ is a pointed Hopf algebra of
dimension $8$, and $R$ is a
Hopf algebra of dimension $p$ in $_K^K\mathcal{YD}$ such that $x \cdot R =0$ for
some $(1,g)$ primitive $x \in K$, $x \cdot R$ being the adjoint action of $x$ on $R$.
\begin{enumerate}
\item[(i)] If $ |G(H) | =4$, suppose furthermore that $K$ is copointed and the condition above holds for $
R^\ast $ and $ K^\ast$, i.e., for $y$ some nontrivial $(1,h)$-primitive in $K^\ast$,
then $y \cdot R^\ast =0$. Then $G(H) \cong C_2 \times C_2$,
$K \cong \mathcal{A}_{2,2}$ in the notation of
Section \ref{subsect: 8} and
$H$ and $H^\ast$ have the Chevalley property.
\item[(ii)] If $|G(H)| = 2$ then there is a Hopf algebra epimorphism $\pi: H \rightarrow A$
where $A$ is a Hopf algebra of dimension $4p$ which is nonsemisimple, nonpointed and non-copointed. Thus if $p \leq 11$, this
situation cannot occur.
\end{enumerate}
\end{prop}
\begin{proof} We note that by Proposition \ref{pr:4-4-boson}(i), $G(K) = G(H)$.
\par (i)
Let $J$ be the Hopf ideal of $H$ generated by $x$. Since
$x \cdot R = 0$, then
$xR = -gRx\subseteq Hx$ and so $J = Hx$. As a $\Bbbk$-space,
$J = Span \{r_ihx | r_1, \ldots, r_p \mbox{ a basis for } R, h \in G(H) \}$ and so the dimension
of $J$ is at most $4p$. Then $\dim H/J \geq 4p$ and divides $8p$ since
$(H/J)^\ast$ is isomorphic to
a sub-Hopf algebra of $H^\ast$. Thus $\dim J = \dim H/J = 4p$.
Then there is a Hopf algebra
epimorphism $\pi: H \rightarrow A$ where $A:= H/J$ is a Hopf
algebra of dimension $4p$ and $H^{co \pi} = \Bbbk\{1,x \}$.
By Proposition \ref{prop:R-skew}, $g^2 =1$ so that $K \cong \mathcal{A}_{4,\xi}^{\prime \prime}$ if $G(H) \cong C_4$
and $K \cong \mathcal{A}_{2,2}$ if $G(H) \cong C_2 \times C_2$.
\par If $H \cong R \# \mathcal{A}_{4,\xi}^{\prime \prime}$,
consider $H^\ast \cong R^\ast \# \mathcal{A}^\prime_4$.
Now the same arguments applied to $H^\ast$ give a contradiction and
so this case is impossible.
\par Since $\pi: H \rightarrow A:=H/J$ is
injective on $\Bbbk G(H)$, $4$ divides $|G(A)|$. Thus, by Theorem \ref{th: ChNg I},
if $A$ is not semisimple,
$A$ is pointed.
But every pointed Hopf algebra of dimension $4p$ has group of grouplikes of order
$2p$ which is not divisible by $4$,
so $A$ must be
semisimple.
\par Since $\mathcal{A}_{2,2}$ is self-dual,
we have $H^\ast \cong R^\ast \# \mathcal{A}_{2,2}$.
The same argument as for $H$ then gives us
a Hopf algebra epimorphism from $H^\ast$ to a
semisimple Hopf algebra $B$ of dimension $4p$ with coinvariants
$\{1,y\}$ where $y$ is $(1,h)$-primitive.
Then $B^\ast$ is isomorphic to a sub-Hopf algebra of $H$, call it $L$.
Since $L$ is cosemisimple, $L
\subseteq H_0$ and we wish to show equality.
Since $L$ has dimension $4p$, the sub-Hopf algebra $\langle L,x \rangle$ of $H$ generated by $L$
and $x$ is all of $H$.
Since $\pi(x) = 0$, this means that by dimensions $\pi$
is injective on $L$ and so $\pi: L \cong A$ is a Hopf algebra isomorphism. This
implies that $H \cong S \# A$ where $S = \Bbbk\{1,x \}$ is a braided Hopf algebra in
$_A^A\mathcal{YD}$ and thus $H$ has
the Chevalley property. Reversing the roles of $H^\ast$ and $H$ in
the above argument we get that $H^\ast$ also has the Chevalley property.
\par (ii) Now suppose that $|G(K)|= 2$ and $K \cong \mathcal{A}_2$.
Then $G(K) = \langle g \rangle$ and $K$ is generated by $g$ and
two $(1,g)$-primitives, $x$ and $x^\prime$. Let $J$ be the Hopf ideal of $H$
generated by $x$ and as in (i),
$J = Hx $. Thus as a $\Bbbk$-space,
$J = Span \{r_ig^jz | r_1, \ldots, r_p \mbox{ a basis for }
R, j=0,1, z \in \{x, x^\prime x \} \}$. Thus $\dim J \leq 4p$
so that $\dim H/J \geq 4p$ and is a divisor of $8p$ so $\dim H/J =4p$
and as above there is a Hopf algebra
epimorphism $\pi: H \rightarrow A$ where $A:= H/J$ is a Hopf algebra
of dimension $4p$ and $H^{co \pi} = \{1,x \}$.
Since $\pi(x^\prime), \pi(g)$ generate a sub-Hopf algebra of $A$ isomorphic to
$H_4$, then $A$ is not semisimple. If $A^\ast$
is pointed, then $H^\ast$ has grouplikes of order $2p$. This is a contradiction since
$H^\ast \cong R^\ast \# K^\ast$ with $K^\ast \cong \mathcal{A}_2$, so that
by Proposition \ref{pr:4-4-boson}(i), $G(H^\ast) = G(\mathcal{A}_2) \cong C_2$.
Suppose that $A$ is pointed. Since $A^\ast$ is not pointed, then
$A \cong \mathcal{A}(-1,1)$ in the notation of
Section \ref{sect: 4p}. But this is impossible since $\mathcal{A}(-1,1)$ has no
sub-Hopf algebra isomorphic to $H_4$. By
Theorem \ref{th: ChNg II}, for $p\leq 11$, $A$ is either semisimple,
pointed or copointed.
\end{proof}
\begin{cor}\label{cor: R comm ss}
Suppose $H \cong R \# K$ where $K$ is a pointed Hopf algebra of dimension $8$, and $R$ is commutative and
semisimple.
\par (i) If $|G(H)|=2$, then there is a Hopf algebra map $\pi$ from $H$ onto a Hopf algebra $A$ of dimension $4p$
which is nonsemisimple, nonpointed and non-copointed.
\par (ii) If $|G(H)| =4$ and furthermore $K$ is copointed and $R^\ast$ is commutative and semisimple,
then $G(H) \cong C_2 \times C_2$ and $H$
and $H^\ast$ have the Chevalley property.
\end{cor}
\begin{proof}
It remains only to show that under the given conditions there is a $(1,g)$-primitive $x$ such that $x \cdot R =0$.
Since $R$ is semisimple commutative, $R$ can be written as the sum of one-dimensional simple ideals $Re_i$ with
$e_i$ a central primitive idempotent. Now apply Lemma \ref{lem:x-R=0} and Proposition
\ref{pr: boson implies chevalley}.
\end{proof}
\begin{cor}\label{cor: type 4,4 dim 24}
If $\dim H=24$ and $H$ has type $(4,4)$, then $H$ and $H^\ast$ have the Chevalley property.
\end{cor}
\begin{proof}
By Corollary \ref{cor:24-4-4-1}, $H \cong R \#K$ where $K$,$K^\ast$
are pointed Hopf algebras of dimension $8$, $R$ is a semisimple braided Hopf algebra in $^K_K\mathcal{YD}$
of dimension $3$, and $R^\ast$ is a semisimple braided Hopf algebra in $^{K^\ast}_{K^\ast}\mathcal{YD}$ of dimension $3$.
Since all simple representations of $R$ and $R^\ast$ must be one-dimensional, $R,R^\ast$ are commutative and
the result follows from Corollary \ref{cor: R comm ss}.
\end{proof}
\begin{remark} Suppose that $H$ is of type $(2^i,2^j)$, has dimension $24$ and $H \cong R \# K$ where $K$ is pointed of dimension $8$. Then $|G(H)| \neq 2$. For by Proposition \ref{pr:R-semisimple}, $R$ and $R^\ast$ are semisimple and thus, since both have
dimension $3$, they are commutative also. Then the conditions of Proposition \ref{pr: boson implies chevalley} hold.
\end{remark}
The next remark summarizes the results
proved for various particular dimensions $8p$, with $H$ nonsemsimple, nonpointed,
noncopointed as assumed throughout this section.
\begin{remark}\label{rm: summary}
\begin{enumerate}
\item[(i)] From Remark \ref{rm: 3 5 7}, if $\dim H = 24, 40, 56$, then $|G(H)| \neq 8$.
\item[(ii)] From Corollary \ref{cor: 3 5 C4}, if $\dim H = 24,40 $, then type $(2p,2)$ is impossible and
for type $(2p,4)$, $G(H^\ast) \cong C_4$.
\item[(iii)] From Corollary \ref{cor:2p-4-4p}, if $p=3,7,11$, $|G(H)| = 2p$, $H^\ast$ does not have the Chevalley property, and $H$ does not contain a copy of $\mathcal{A}(-1,1)$, i.e.,
we are in Case (iii) of Proposition \ref{prop:2p-exact}, then $G(H^\ast) \ncong C_4$.
\item[(iv)] From Corollary \ref{cor: 24 type 6,4 corad} if $\dim H = 24$ and $H$ has type $(6,4)$ then if $H^\ast$ does not have the Chevalley property,
then $H$ has a sub-Hopf algebra isomorphic to $\mathcal{A}(-1,1)$, $\dim H^\ast_0 =20$ and as coalgebras, either $H^\ast \cong \mathcal{A}_4^{\prime \prime } \oplus \mathcal{M}^\ast(2, \Bbbk)^4$ or $H^\ast \cong \mathcal{A}_4^{\prime \prime } \oplus \mathcal{M}^\ast(4, \Bbbk)$.
\item[(v)] By Corollary \ref{cor: type 4,4 dim 24}, if $\dim H = 24$ and $H$ does not have the Chevalley property, then $H$ is not of type $(4,4)$.
\end{enumerate}
\end{remark}
\subsection{Hopf algebras of dimension $24$}
In this subsection we specialize to the case of $p=3$, $\dim H = 24$. Unless otherwise stated, throughout this section
$H$ will denote a Hopf algebra without the Chevalley property.
\par Our first result is a general statement for all Hopf algebras of dimension $8p$ and will need the
following remark about
nonabelian groups of order $4p$.
\begin{remark}\label{rm: DF}
Suppose that $L$ is a nonabelian group of order $4p$, $p$ an odd
prime. Then unless $p=3$ and $L = \mathbb{A}_4$, $L$ has a normal
subgroup $N$ of order $p$. (This follows from the Sylow Theorems;
see, for example, \cite[p. 34]{L}.) Then there is a Hopf algebra map
from $\Bbbk L$ to $\Bbbk (L/N)$ where $L/N$ is a group of order $4$.
Dualizing we see that $\Bbbk^L$ contains a sub-Hopf algebra
isomorphic to a group algebra of dimension $4$ and thus $G(\Bbbk^L)$
is a group of order $4$.
\end{remark}
\begin{prop}
\label{pr: nontriv gplike} Let $H$ be a nonsemisimple, nonpointed non-copointed Hopf algebra with
$\dim H = 8p$ and suppose $H$ has a
simple subcoalgebra $D$ of dimension $4$ stable under the antipode.
Then $H$ has a nontrivial grouplike element of order $2$.
\end{prop}
\begin{proof}
Let $\mathcal{H}$ denote the sub-Hopf algebra of
$H$ generated by $D$. Then
$\dim \mathcal{H} \neq 2,4, p$ and so $\dim \mathcal{H} = 8, 2p, 4p $ or $8p$.
\par
If $\dim \mathcal{H} = 8$, then by the classification of Hopf
algebas of dimension $8$,
\cite{W}, \cite{stefan}, $\mathcal{H} \cong \Bbbk [C_2 \times
C_2] \oplus \mathcal{M}^\ast (2,\Bbbk)$ as coalgebras if
$\mathcal{H}$ is semisimple and $\mathcal{H} \cong H_4 \oplus
\mathcal{M}^\ast(2, \Bbbk)$ if $\mathcal{H}$ is copointed. In either
case, $\mathcal{H}$,
and thus $H$,
contains a grouplike element of order $2$.
If $\dim \mathcal{H} = 2p$, then by \cite{Ng3}, $\mathcal{H}$ is
semisimple, so that $\mathcal{H}= \Bbbk^{\mathbb{D}_{p}}$ and
has a
grouplike of order $2$.
Now suppose that $\dim \mathcal{H} = 4p$. By Proposition
\ref{prop:natale-stefan},
$\mathcal{H} $ fits into a central exact sequence:
\begin{equation*}
\Bbbk^G \overset{i}{\hookrightarrow} \mathcal{H}
\overset{\pi}{\twoheadrightarrow} A
\end{equation*}
for a group $G$ and $A$ a nonsemisimple copointed Hopf algebra. Then
$|G| \in \{1,2,4,p,2p,4p \}$. If $|G| = 1$, then $\mathcal{H}$ is
nonpointed nonsemisimple but has pointed dual, so by Subsection
\ref{sect: 4p}, $\mathcal{H} \cong \mathcal{A}(-1,1)^\ast \cong H_4
\oplus \mathcal{M}^\ast(2, \Bbbk)^{p-1}$ as coalgebras and
consequently has a grouplike element of order $2$. If $|G| \in
\{2,4,2p\}$, then $\Bbbk^G$ has also a grouplike element of order
$2$. If $|G|=p$, then $p$ divides $|G(\mathcal{H})|$ and $|G(H)|$
so that by Proposition \ref{pr: dim not p,8p,4p}, $G(H) \cong C_{
2p}$ and $H$ has a grouplike of order $2$. If $|G| = 4p$, then
$\mathcal{H} = \Bbbk^G$ for $G$ a nonabelian group of order $4p$. By
Remark \ref{rm: DF},
$\Bbbk^G$ has a group of grouplikes of
order $4$ unless $p=3$, $G=\mathbb{A}_4$ and the dimension of
$\mathcal{H}$ is $12$. But if $\mathcal{H} = \Bbbk^{\mathbb{A}_4}$
does
not have a grouplike of order $2$, then
as a coalgebra $\Bbbk^{\mathbb{A}_4}
\cong \Bbbk C_3 \oplus \mathcal{M}^\ast (3,\Bbbk)$.
But $\mathcal{H}$ has a simple subcoalgebra
of dimension $4$, so this case is impossible.
Finally, assume that $D$ generates $H$ so that as above,
we have an exact sequence
$\Bbbk^G \overset{i}{\hookrightarrow} H
\overset{\pi}{\twoheadrightarrow} A$
for a group $G$ and $A$ a nonsemisimple copointed Hopf algebra.
Since $H$ is assumed to be non-copointed, then $|G| \neq 1$, and
since $H$ is nonsemsimple, $|G| \neq 8p$. The argument above shows
that if $|G| \in \{ 2,4,p,2p \}$, then $H$ has a grouplike
element of order $2$. If $|G|$ is $8$ or $4p$, then $A$ has
dimension $p$ or $2$ respectively and so must be semisimple. This
would imply that $H$ is semisimple, a contradiction.
\end{proof}
\begin{lema}\label{lem:24-gr-2}
If $\dim H = 24$ then $H$ has a grouplike element of order $2$.
\end{lema}
\begin{proof}
By Proposition \ref{pr: dim not p,8p,4p}, $G(H)\ncong C_p = C_3$
so it suffices to show that $H$ has a nontrivial grouplike
element, i.e., that $H_0$ is not of the form $\Bbbk\cdot 1 \oplus E$
where $E$ is a sum of simple subcoalgebras of dimension greater than
$1$. Suppose that
\begin{equation*}
H_0 = \Bbbk\cdot 1 \oplus \oplus_{i=1}^t D_i \mbox{ where } D_i
\cong \mathcal{M}^\ast (n_i,\Bbbk) \mbox{ and } n_j \leq n_{j+1}.
\end{equation*}
By Proposition
\ref{prop:biti-dasca}, $\dim H_0 \leq 15$ so that the possibilities for $H_0$ are
$H_0 = \Bbbk \cdot 1 \oplus {\mathcal M}^{\ast}(2,\Bbbk)^s $ with $s=1,2,3$,
$H_0 = \Bbbk \cdot 1 \oplus {\mathcal M}^{\ast}(3,\Bbbk) $ or
$H_0 = \Bbbk \cdot 1 \oplus {\mathcal M}^{\ast}(2,\Bbbk) \oplus {\mathcal M}^{\ast}(3,\Bbbk) $. If $H$ has a simple
subcoalgebra of dimension $4$ stable under the antipode then by Proposition \ref{pr:
nontriv gplike}, $H$ has
a grouplike element of order $2$.
If $H_0 = \Bbbk \cdot 1 \oplus {\mathcal M}^{\ast}(3,\Bbbk) $ then Proposition
\ref{prop:biti-dasca} implies that $\dim H \geq 26$, a contradiction. Thus only the cases
$H_0 = \Bbbk \cdot 1 \oplus {\mathcal M}^{\ast}(2,\Bbbk)^s $ with $s=2,3$ and $S(D_i) \neq D_i$ remain.
\smallbreak
\par Suppose that $H_0 = \Bbbk \cdot 1 \oplus \sum_{i}D_i $ with $D_i \cong {\mathcal M}^{\ast}(2,\Bbbk)$
and $S(D_i) = D_j$ for some $j \neq i$. Note that $\dim H_0 >8$.
Let ${\mathcal D} $ denote the set of $D_i$. Then since $4$ divides $\dim D_i$, $2\dim P^{ 1, {\mathcal D}}$, and $\dim P^{{\mathcal D},{\mathcal D}}$, then
$4$ divides $1 + \dim P^{1,1}$ and $\dim P^{1,1} \geq 3$. Thus by Lemma \ref{lema:fukuda}, $P^{1,1}_\ell$
is nondegenerate for some $\ell >2$. Then $P_{m}^{1,D_i}, P_1^{D_i,1}, P_1^{1, S(D_i)}$ are nondegenerate for
$m = \ell -1\geq 2$, some $i$. Then $2\dim P^{1,{\mathcal D}} \geq 8$. Since $P_1^{D_i,1}$ and $P_m^{S(D_i),1}$
are nondegenerate then $P^{D_i, {\mathcal D}}$ and $P^{S(D_i), {\mathcal D}}$ are nondegenerate and $\dim P^{{\mathcal D},{\mathcal D}} \geq 8$.
But this is impossible if $\dim H = 24$.
\end{proof}
\begin{remark} Similar arguments to the proof of Lemma \ref{lem:24-gr-2}
apply if $\dim H=4n$
and $H_0 = \Bbbk \cdot 1 \oplus \sum_{i=1}^t D_i$ with $ D_i = {\mathcal M}^\ast(2,\Bbbk) $ and
$D_i \neq S(D_i)$ for all $i$. Let ${\mathcal D} $ denote the set of $D_i$.
Then $2 \dim P^{1,{\mathcal D}} + \dim P^{{\mathcal D},{\mathcal D}} \geq 20$ where ${\mathcal D}$ denotes the set of
simple $4$-dimensional subcoalgebras.
\par For, we may suppose that $P^{1,1} = P^{1,1}_\ell$ with $\ell \geq 3$.
Then $P_1^{1,C},P_{\ell -1}^{C,1}, P_1^{S(C),1},P_{\ell -1}^{C,E},
P_{\ell -2}^{C,D}, P_1^{D,1}$ are nondegenerate
for some $C,D,E \in {\mathcal D}$
so that $2\dim P^{1,{\mathcal D}} \geq 8$ and $\dim P^{{\mathcal D},{\mathcal D}} \geq 8$.
Furthermore, since $\ell -1\geq 2$, then $P_1^{C,X}, P_{\ell -2}^{X,E}$
are nondegenerate for some coalgebra $X$.
If $\dim X = 1$,
then $P_1^{C,1}, P_1^{S(C),1}, P_{\ell -1}^{C,1}$ are nondegenerate and
$2 \dim P^{1,{\mathcal D}} \geq 12$. If $\dim X = 4$,
then $P_1^{C,X}, P_{\ell -1}^{C,E}, P_{\ell -2}^{C,D} $ are nondegenerate and the statement follows.
\end{remark}
We finish the section with the proof of Theorem \ref{thm:24}.
\bigbreak
\noindent {\bf Proof of Theorem B.}
Let $ \dim H =24$ and suppose that $H$ does not have the Chevalley property.
Then $ |G(H)|\neq 1, 3, 8, 12$ or $ 24 $, by Lemma
\ref{lem:24-gr-2},
Remark \ref{rm: 3 5 7}
and Proposition \ref{pr: dim not p,8p,4p}. Since $ |G(H)| $
divides $ \dim H $, we have that $|G(H)|\in \{ 2, 4,6 \}$ and by Remark \ref{rm: summary},
the proof is complete.
\qed
\newpage
\section{Open cases}
The following table enumerates all open cases in the
classification of Hopf algebras of dimension less than $100$ up to isomorphism.
In this table, $p$ is arbitrary, not necessarily odd.
\begin{table}[here]
\begin{center}
\tiny{\begin{tabular}
{|p{1cm}|p{3.1cm}|p{3,9cm}|p{3,4cm}|p{3,4cm}|} \hline
{\bf $\dim H$} & {\bf Semisimple} & {\bf Pointed}&
{\bf Chevalley} & {\bf Other}\\
\hline\hline
$ p$ & {\bf Completed:}
\newline All trivial \cite{Z}
& {\bf None} & {\bf None}
& {\bf None:} \cite{Z}
\\ \hline
$ 2p$\newline
$p$ odd & {\bf Completed:}
\newline All trivial \cite{ma-2p}\footnotemark
& {\bf None}
& {\bf None} & {\bf None:} \cite{Ng3}
\\ \hline
$ p^2$ & {\bf Completed:}
All trivial \cite{masuoka-p^n}
& {\bf Completed:} $\exists$ $p-1$, the Taft Hopf algebras \cite{andrussch}
& {\bf None} & {\bf None:} \cite{Ng}
\\ \hline
$ pq$ & {\bf Completed:}
All trivial \newline \cite{ma-6-8, Ng3, EG, GW, So, pqq2}
& ${\bf None}$
& {\bf None} & {\bf None:} for $p<q \leq 4p+11$ \cite{Ng4}
\newline {\bf Open:}
$87$, $93$.
\\
\hline
$p^3$ & {\bf Completed:} \newline $p=2 $ , $\exists$ $ 1$ \cite{k-p} \cite{ma-6-8}
\newline $ p$ odd , $\exists$ $p+1$ \cite{ma-pp}
& {\bf Completed:} $p=2$, $\exists$ $5$ \cite{stefan}
\newline $p$ odd $\exists$ $(p-1)(p+9)/2$ \cite{AS2, CD, SV}
& {\bf {\bf None}} &
${\bf None:}\newline $ $8$ \cite{W}, \cite{stefan}
\newline $27$ \cite{GG}, \cite{bg}
\\
\hline
$2p^2$
\newline $p$ odd& {\bf Completed:} $\exists$ $2$, they are duals
\cite{masuoka-further}, \cite{pqq}
& {\bf Completed:}\newline
$\exists$ $4(p-1)$ \cite[A.1]{andrunatale}&
{\bf None} &
{\bf None:}\footnotemark
\cite{hilgemann-ng} \\
\hline
$pq^2$
\newline $p$ odd& {\bf Completed:}\footnotemark
\newline \cite{G, masuoka-further, pqq, pqq2, clspqq, eno-08}& {\bf Completed:}\newline
$\exists$ $4(q-1)$ \cite[A.1]{andrunatale}
& {\bf None:} \cite[Lemma A.2]{andrunatale} & {\bf Completed:}
$12$ \cite{natale}
\newline 20, 28, 44 \cite{ChNg}
\newline
{\bf Open:}
$ 45$, $ 52$, $ 63$,
$ 68$, $ 75$, $ 76$,
$ 92$, $ 99$. \\
\hline
$ pqr$ & {\bf Completed:}\footnotemark \newline
\cite{pqq, pqq2, eno-08} & {\bf None} &
{\bf None: } Prop. \ref{prop:no-chev-rpq} &
{\bf Completed:} $30$ \cite{fukuda-30} \newline
{\bf Open:}
$42$,
$66$,
$70$, $78$\\
\hline
$ p^4$ & {\bf Completed:} $p=2$, $\exists$ $16$ \cite[Theorem 1.2]{kashina} \newline {\bf Open:} 81
& {\bf Completed:} $16$; $\exists$ $29$
\cite{pointed16} \footnotemark
\newline {\bf Completed:} $p$ odd \cite{AS2}.
Infinite nonisomorphic families exist \cite{AS2}, \cite{bdg}, \cite{Gel}\footnotemark[10] &
{\bf Completed:} 16 \cite{de1tipo6chevalley}\newline $ \exists $ 2
selfdual, coradical $A_8$
\newline{\bf Open:} 81 & {\bf Completed:} 16
\cite{GV}
\newline
{\bf Open:}
$81$
\\
\hline
$ p^3q$ & {\bf Open}
& {\bf Completed:}\footnotemark \newline $24$, $40$, $54$, $56$ \cite{G1}
\newline {\bf Open:} $88$ \footnotemark[9]
& {\bf Open}
& {\bf Open:}\newline
$24 $,
$40 $,
$54 $,
$56 $,
$88 $.
\\
\hline$ p^2q^2$ & {\bf Open}
& {\bf Completed:} 36
\cite{G1} \newline
{\bf Open:} $100$ \footnotemark[9]
& {\bf Open} & {\bf Open}:
$36$,
$100$
\\
\hline $ p^2qr$ & {\bf Open} & {\bf Completed:} $60$ \cite{G1}
\newline {\bf Open:}
$84$, $90$ \footnotemark[9]
& {\bf Open} & {\bf Open:}
$60$, $84$, $90$
\\
\hline
$ p^3q^{2}$ & {\bf Open}
& {\bf Open}\footnotemark &
{\bf Open }\footnotemark
& {\bf Open:} $72$
\\
\hline
$ p^n$ \newline $n=5,6$
& {\bf Open} & {\bf Completed:}
$32$. \cite{G3} Infinite families of nonisomorphic
Hopf algebras exist. \cite{G3}, \cite{b iso}\footnotemark[10]
\newline {\bf Open:} 64 &
{\bf Open} & {\bf Open:} $32$, $64$ \\
\hline
$p^{4}q$ & {\bf Open} & {\bf Completed:} 48
\cite{G1}
\newline {\bf Open:} $80$ &
{\bf Open} & {\bf Open:}
$48$, $80$
\\
\hline
$ p^5q$ & {\bf Open}
& {\bf Open}\footnotemark[6]
& {\bf Open} & {\bf Open:}
$96$\\ \hline
\end{tabular}}
\end{center}
\caption{ Hopf algebras of dimension $\leq
100$}\label{tab-abiertas}
\end{table}
\footnotetext[1] {Dimension $6$ was classified in \cite{ma-6-8}.}
\footnotetext[2]{The classification for dimension $18= 2(3^2)$
was completed in \cite{d-fukuda}.}
\footnotetext[3]{The complete classification of semisimple Hopf algebras of
dimension $12= 3(2^2)$ is given in \cite{fukuda}.}
\footnotetext[4]{The complete classification of semisimple Hopf algebras of
dimension $30$ and $42$ is given in \cite{Na-mm}.}
\footnotetext[5]{The duals to these are explicitly constructed in \cite{biti2}.}
\footnotetext[6]{ Pointed
Hopf algebras $H$ with $\frac{\dim H}{|G(H)|}< 32$ or
$\frac{\dim H}{|G(H)|} =p^{3}$ were classified in \cite{G1}. }
\footnotetext[7]{Pointed Hopf algebras with nonabelian
grouplikes known to exist by \cite{AHS} dimension $p^3p^2$ ,
\cite{FG} dimension $p^5q$.}
\footnotetext[8]{Nonpointed Hopf algebras with
Chevalley property known to exist \cite{AV1, AV2}.}
\footnotetext[9]{ $\dim p^3q, p^2q^2, p^2qr$: For dimensions $88$, $100$, $84$, $90$, the classification of the pointed Hopf algebras was completed
for those with coradical a group algebra of order a power of 2 in \cite{nichols} and \cite{G1}.}
\footnotetext[10]{The families of nonisomorphic pointed Hopf algebras of dimension $81$ consist of quasi-isomorphic Hopf algebras \cite{masdefending} but the duals of the families of nonisomorphic pointed Hopf algebras of dimension $32$ give an infinite family of non-quasi-isomorphic
Hopf algebras \cite{eg}.}
The columns from left to right describe
the classification of Hopf algebras which are semisimple, pointed nonsemisimple,
nonsemisimple nonpointed with the Chevalley property, etc.
We call a Hopf algebra \emph{trivial} if it is a group
algebra or the dual of a group algebra. For dimension $mn^2$, pointed Hopf algebras
always exist; just take $\Bbbk C_m \otimes T_q$ where $q$ is a primitive $n$th root of unity.
\par Note that by
\cite[Prop. 1.8]{andrunatale}, a Hopf algebra of square-free dimension cannot be pointed.
Also note if for every divisor $m$ of some dimension $n$ the only semisimple Hopf algebras of
dimension $m$ are the group algebras, then there are no Hopf algebras of dimension $n$ with the Chevalley property.
For example, this is why there are no nonpointed Hopf algebras of dimension $p^3$ with the Chevalley property.
\par Examples of nonpointed but copointed Hopf algebras do exist. They are given
by duals of nontrivial liftings which are not Radford bosonizations. See for
example \cite{biti}.
In general, this table does not contain references to partial results for a particular dimension even
though the literature may contain some. For example the general classification for dimension $24$ is listed only as Open.
Also when a general result has been proven, the table cites only that result. For example, \cite{hilgemann-ng} is cited
for the result that all Hopf algebras of dimension $2p^2$, $p$ odd, are semisimple or pointed; the specific case of dimension
$18$ was proved in \cite{d-fukuda}. We have attempted to include references to some specific cases in the footnotes but make no claim
that these are complete.
|
{
"timestamp": "2012-06-29T02:00:54",
"yymm": "1206",
"arxiv_id": "1206.6529",
"language": "en",
"url": "https://arxiv.org/abs/1206.6529"
}
|
\section{Introduction}\label{introduction}\label{sec1}
The study of the states of quark matter under extreme conditions has
attracted much attention over the past few years. Extreme conditions
include high temperatures and finite baryonic chemical potentials as
well as strong magnetic fields. The latter is responsible for many
interesting effects on the properties of quark matter. Some of the
most important ones are magnetic catalysis of dynamical chiral
symmetry breaking \cite{klimenko1992, miransky1995, catalysis}, that
leads to a modification of the nature of electroweak
\cite{ayala2008}, chiral and color-superconducting phase transitions
\cite{superconducting, fayazbakhsh2010-1, fayazbakhsh2010,
skokov2011, pawlowski2012}, production of chiral density waves
\cite{klimenko2010}, chiral magnetic effect \cite{kharzeev2008}, and
last but not least inducing electromagnetic superconductivity and
superfluidity \cite{chernodub2011}. In this paper, we will focus on
the effect of constant magnetic fields on the properties of
\textit{neutral and noninteracting} mesons in a hot and dense quark
matter. In particular, the temperature dependence of meson masses as
well as their direction-dependent refraction indices\footnote{The
term ``refraction index'' is used in \cite{shuryak1990} for pions
modified by the matter (quasipions). Although, the same terminology
is also used in \cite{ayala2002}, the definitions of refraction
index in \cite{shuryak1990} and \cite{ayala2002} are slightly
different, as will be explained later.} and screening masses will be
explored in the presence of various fixed magnetic fields. The
largest observed magnetic field in nature is about $10^{12}-10^{13}$
Gau\ss~in pulsars and up to $10^{14}-10^{15}$ Gau\ss~on the surface
of some magnetars, where the inner field is estimated to be of order
$10^{18}-10^{20}$ Gau\ss~\cite{incera2010}. There are also evidences
for the creation of very strong and short-living magnetic fields in
the early stages of non-central heavy ion collisions at RHIC
\cite{kharzeev51-STAR, mclerran2007}. Depending on the collision
energies and impact parameters, the magnetic fields produced at RHIC
and LHC are estimated to be in the order $eB\sim 1.5~m_{\pi}^{2}$,
corresponding to $0.03$ GeV$^{2}$ for $m_{\pi}=138$ MeV, and $eB\sim
15~m_{\pi}^{2}$, corresponding to $0.3$ GeV$^{2}$, respectively
\cite{skokov2010}.\footnote{Note that $eB=1$ GeV$^{2}$ corresponds
to $B\sim 1.7\times 10^{20}$ Gau\ss.} On the other hand, it is known
that the quark-gluon plasma, produced in high-energy heavy-ion
collisions, passes over many stages during its evolution. The last
of which consists of a large amount of hadrons, including pions,
until a final freeze-out \cite{ayala2002}. Thus, the presence of a
background magnetic field created in heavy ion experiments may
affect the properties of ``charged quarks'' in the earliest stage of
the collision and although the created strong magnetic field is
extremely short living and decays very fast \cite{mclerran2007,
skokov2010}, it may affect the properties of the hadrons made of
these ``magnetized'' quarks. Even the properties \textit{neutral
mesons} may be affected by the external magnetic field produced in
the earliest phase of heavy-ion collisions. In the present paper, we
do not intend to go through the phenomenology of heavy-ion
collisions. Our computation is only a theoretical attempt to study
the effect of external magnetic fields on ``magnetized''
\textit{neutral mesons}, that, because of the lack of electric
charge, do not interact directly with the external magnetic field.
Our study is indeed in contrast with the recent studies in
\cite{andersen2011-pions, anderson2012-2}, where chiral perturbation
theory is used to study the effect of external magnetic fields on
the pole and screening masses as well as the decay rates of
\textit{charged pions} interacting directly with the external
magnetic field.
\par
There are several attempts to study the effect of temperature and
chemical potential on the properties pions in a hot and dense
medium, in the absence of external magnetic fields
\cite{shuryak1990, pisarski1996, ayala2002, chiral-perturbation,
son2000}. In \cite{shuryak1990}, the energy dispersion relation of
the so-called ``quasipions'' (or pions modified by the matter) is
introduced by
\begin{eqnarray}\label{int1a}
\omega^{2}(p)=u^{2}\mathbf{p}^{2}+m_{\pi}^{2}.
\end{eqnarray}
Here, $u(T)$ is the temperature-dependent refraction index (also
called ``mean quasipion velocity'' \cite{shuryak1990}), and
$m_{\pi}$ is the pole mass of the pions. To determine $m_{\pi}$, one
can either start from the Lagrangian density of a linear
$\sigma$-model including four-pion interaction or use the chiral
perturbation theory Lagrangian within certain approximation.
Considering the pion (one-loop) self-energy of the model, and
computing, in particular, its pole, it is possible to determine the
pion pole mass (at one-loop level). As concerns the screening mass
of pions, $m^{s}_{\pi}$, it is related to $m_{\pi}$ through the
relation $m^{s}_{\pi}=m_{\pi}/v_{\pi}$, where $v_{\pi}$ is the pion
velocity \cite{pisarski1996}. As it is shown in \cite{pisarski1996},
the velocity $v_{\pi}$ of \textit{massless} pions is in general
given by
\begin{eqnarray}\label{int1}
\omega^{2}=v_{\pi}^{2}p^{2}\equiv \frac{\mbox{Re}
f_{\pi}^{s}}{\mbox{Re}f_{\pi}^{t}}p^{2},
\end{eqnarray}
where $\omega\equiv p_{0}$ is the energy, $p\equiv |\mathbf{p}|$ is
the absolute value of pion three momentum, and $f_{\pi}^{t}$ and
$f_{\pi}^{s}$ are temporal and spatial pion decay constants,
respectively. As it turns out, at zero temperature, because of
relativistic invariance, $f_{\pi}^{t}=f_{\pi}^{s}$, and therefore
$v_{\pi}=1$. At finite temperature, however, since a privileged rest
frame is provided by the medium, relativistic invariance does not
apply anymore, and, as it is shown in \cite{pisarski1996}, ``cool''
pions propagate at a velocity $v_{\pi}<1$. Moreover, it is shown in
\cite{pisarski1996}, that for approximate chiral symmetry, the
Gell-Mann, Ookes and Renner (GOR) relation between the pion mass
$m_{\pi}$ and the pion decay constant $f_{\pi}$ still holds at
finite temperature, except that instead of $f_{\pi}$, the real part
of $f_{\pi}^{t}$ enters the GOR relation, i.e.
$m_{\pi}^{2}=\frac{2m_{0}\langle
\bar{\psi}\psi\rangle}{(\mbox{Re}f_{\pi}^{t})^{2}}$. Let us also
notice that at finite temperature and in the absence of external
magnetic fields, no distinction is to be made between neutral and
charged pion masses.
\par
Nontrivial energy dispersion relation of mesons is also introduced
in \cite{ayala2002} and \cite{son2000}. In \cite{ayala2002}, using
the definition of the group velocity, a momentum dependent
``refraction index'' $\tilde{n}(p)$ is defined for pions by the
ratio of the group velocity in matter and in vacuum,
$\tilde{n}(p)\equiv v_{gr}^{\mbox{\tiny{vac}}}/v_{gr}$. Here, the
matter pion group velocity is defined by $v_{gr}\equiv
\frac{dp_{0}}{dp}$ with
$p_{0}=[n^{-1}(T,\mu)p^{2}+M^{2}(T,\mu)]^{1/2}$, and the vacuum pion
group velocity is defined by $v_{gr}^{\mbox{\tiny{vac}}}\equiv
\frac{p}{p_{0}^{\mbox{\tiny{vac}}}}$. The momentum dependent
refraction index is therefore given by
$n(p)=\left(\frac{p_{0}}{p_{0}^{\mbox{\tiny{vac}}}}\right)n$. It is
argued that since for finite temperature $T$ and chemical potential
$\mu$, we always have both $n>1$ and
$\frac{p_{0}}{p_{0}^{\mbox{\tiny{vac}}}}>1$ for all values of $p$,
the index of refraction developed by the pion medium at finite $T$
and $\mu$ is always larger than unity \cite{ayala2002}. Let us
notice that the definition of the refraction index $n$ in
\cite{ayala2002} is slightly different from what is used in
\cite{shuryak1990}: In \cite{ayala2002}, $n^{-1}$ appearing in the
dispersion relation $p_{0}=[n^{-1}(T,\mu)p^{2}+M^{2}(T,\mu)]^{1/2}$
is the same as $u^{2}$ appearing in the dispersion relation
(\ref{int1a}) from \cite{shuryak1990}. In the latter, $u=n^{-1/2}$
is called refraction index.\footnote{In the present paper, we have
adopted the terminology used in \cite{shuryak1990}.} Having this in
mind, it turns out that the results presented in \cite{ayala2002},
coincides with those obtained in \cite{son2000}. Here, the quantity
$u$ appears as in \cite{shuryak1990}, in the pion energy dispersion
relation, $\omega^{2}=u^{2}(\mathbf{p}^{2}+m^{2})$, and is termed
``velocity'', although the authors mention that $u$ is the pion
velocity only when $m=0$. Here, $m$ is the screening mass. The pion
pole mass is then defined by $m_{p}=um$. Using scaling and
universality arguments, the authors predict that ``when critical
temperature is approached from below, the pole mass of the pion
drops despite the growth of the pion screening mass. This fact is
attributed to the decrease of the pion velocity near the phase
transition'' \cite{son2000}.
\par
As concerns the effect of external magnetic fields on the low energy
properties of QCD, in \cite{agasian2001}, the GOR relation between
the neutral pion mass $m_{\pi^{0}}$ and its decay constant
$f_{\pi^{0}}$, is shown to be valid in the first order a chiral
perturbation theory in the presence of constant and weak magnetic
fields, whose Lagrangian includes, in particular,
$(\vec{\pi}^{2})^{2}$ self-interaction terms. This method is also
used recently in \cite{andersen2011-pions, anderson2012-2} to
determine the pion thermal mass and the pion decay constants in the
presence of a constant magnetic field and at finite temperature. It
is shown, that the magnetic field gives rise to a splitting between
$m_{\pi^{0}}$ and $m_{\pi^{\pm}}$ as well as $f_{\pi^{0}}$ and
$f_{\pi^{\pm}}$. The pion decay constants $f_{\pi^{0}}$ and
$f_{\pi^{\pm}}$ are computed by evaluating the matrix elements
$\langle0|A_{\mu}^{0}|\pi^{0}\rangle$ and
$\langle0|A_{\mu}^{\pm}|\pi^{\mp}\rangle$, respectively. However, no
distinction between the temporal ($\mu=0$) and spatial ($\mu=1,2,3$)
directions is made.
\par
In the present paper, we will mainly focus on nontrivial energy
dispersion relations of \textit{noninteracting} $\sigma$ and
$\vec{\pi}$ mesons, arising from an appropriate evaluation of the
one-loop effective action of a two-flavor NJL model in a derivative
expansion up to second order. Our method is therefore different from
the method used in \cite{andersen2011-pions, anderson2012-2}, and
involves, in contrast to \cite{andersen2011-pions, anderson2012-2},
the effect of external magnetic fields on \textit{charged quarks}
from which the mesons are built. This will give us the possibility
to explore the effect of external magnetic fields on \textit{neutral
mesons} at finite temperature and chemical potential. Using the
method originally introduced in \cite{miranskybook, miransky1995}
for a single flavor NJL model, we will arrive at the effective
action of $\sigma$ and $\vec{\pi}=(\pi_{1},\pi_{2},\pi_{3})$ mesons,
\begin{eqnarray}\label{int2}
\lefteqn{\hspace{-0.6cm}\Gamma_{\mbox{\tiny{eff}}}[\sigma,\vec{\pi}]=\Gamma_{\mbox{\tiny{eff}}}[\sigma_{0}]}\nonumber\\
&&\hspace{-0.8cm}-\frac{1}{2}\int
d^{d}x~{\sigma}(x)\left(M_{\sigma}^{2}+{\cal{G}}^{\mu\mu}\partial_{\mu}^{2}\right){\sigma}(x)\nonumber\\
&&\hspace{-0.8cm}-\frac{1}{2}\sum\limits_{\ell=1}^{3}\int
d^{d}x~{\pi}_{\ell}(x)\left(M_{\vec{\pi}}^{2}+
{\cal{F}}^{\mu\mu}\partial_{\mu}^{2}\right)_{\ell\ell}{\pi}_{\ell}(x),
\end{eqnarray}
including nontrivial meson squared mass matrices $(M_{\sigma}^{2},
M_{\vec{\pi}}^{2})$ and form factors $({\cal{G}}^{\mu\nu},
{\cal{F}}^{\mu\nu})$, and leading to the energy dispersion relations
of $\sigma$ and $\vec{\pi}$ mesons
\begin{eqnarray}\label{int3}
E_{\sigma}^{2}&=&\sum_{i}(u_{\sigma}^{(i)}p_{i})^{2}+m_{\sigma}^{2},
\nonumber\\
E_{\vec{\pi}}^{2}&=&\sum_{i}(u_{\vec{\pi}}^{(i)}p_{i})^{2}+m_{\vec{\pi}}^{2}.
\end{eqnarray}
Here, the pole masses $(m_{\sigma}^{2},m_{\vec{\pi}}^{2})$ and
refraction indices $(\mathbf{u}_{\sigma}, \mathbf{u}_{\vec{\pi}})$
of the mesons are defined by
\begin{eqnarray*}
m_{\sigma}^{2}=\frac{\mbox{Re}[
M_{\sigma}^{2}]}{\mbox{Re}[{\cal{G}}^{00}]},\qquad
m_{\pi_{\ell}}^{2}=\frac{\mbox{Re}[M_{\pi_{\ell}}^{2}]}{\mbox{Re}[({\cal{F}}^{00})_{\ell\ell}]},
\end{eqnarray*}
and
\begin{eqnarray*}
u_{\sigma}^{(i)}=\left(\frac{\mbox{Re}[{\cal{G}}^{ii}]}{\mbox{Re}[{\cal{G}}^{00}]}\right)^{1/2},\qquad
u_{\pi_{\ell}}^{(i)}=\left(\frac{\mbox{Re}[({\cal{F}}^{ii})_{\ell\ell}]}{\mbox{Re}[({\cal{F}}^{00})_{\ell\ell}]}\right)^{1/2},
\end{eqnarray*}
for all space directions $i=1,2,3$ and isospin indices $\ell=1,2,3$.
These quantities can be computed using the one-loop effective action
of a two-flavor NJL model at finite temperature $T$, chemical
potential $\mu$ and for a constant magnetic field $B$, according to
the formalism presented in \cite{miranskybook, miransky1995}. Using
the definition of the screening mass from \cite{son2000}, the
screening masses of $\sigma$ and $\vec{\pi}$ mesons,
$m_{\sigma}^{(i)}$ and $m_{\vec{\pi}}^{(i)}$ are given by
\begin{eqnarray*}
m_{\sigma}^{(i)}=\frac{m_{\sigma}}{u_{\sigma}^{(i)}},\qquad\mbox{and}\qquad
m_{\vec{\pi}}^{(i)}=\frac{m_{\vec{\pi}}}{u_{\vec{\pi}}^{(i)}},~~~\forall
i=1,2,3,
\end{eqnarray*}
respectively. Later, we will, in particular, show that in the
presence of a uniform magnetic field, directed in a specific
direction, the refraction indices and screening masses in the
transverse and longitudinal directions with respect to the direction
of the background magnetic field will be different.
\par
The organization of this paper is as follows. In Sec. \ref{sec2}, we
will generalize the method introduced in \cite{miransky1995} to a
multi-flavor system, and will derive the effective action
(\ref{int2}), using an appropriate derivative expansion up to second
order. In Sec. \ref{sec3}, we will determine the one-loop effective
potential of a two-flavor NJL model including $(\sigma,\vec{\pi})$
mesons. In Sec. \ref{sec4}, the squared mass matrices
$(M_{\sigma}^{2}, M_{\pi^{0}}^{2})$ and kinetic coefficients
$({\cal{G}}^{\mu\nu}, {\cal{F}}^{\mu\nu})$ corresponding to neutral
mesons $\sigma$ and $\pi^{0}$ will be analytically computed at
finite $(T,\mu,eB)$ and up to an integration over $p_{3}$-momentum
as well as a summation over Landau levels. In Sec. \ref{sec5p1}, we
will first use the one-loop effective potential, evaluated in Sec.
\ref{sec3}, to explore the phase portrait of the model. Here, the
effect of magnetic catalysis \cite{klimenko1992, miransky1995} and
inverse magnetic catalysis \cite{fayazbakhsh2010, rebhan2011} on the
critical $(T,\mu,eB)$ will be scrutinized. Performing numerically
the remaining $p_{3}$-integration and the summation over Landau
levels from Sec. \ref{sec4}, we will present, in Sec. \ref{sec5p2},
the $T$-dependence of $(M_{\sigma}^{2}, M_{\pi^{0}}^{2})$ and
$({\cal{G}}^{\mu\nu}, {\cal{F}}^{\mu\nu})$ for various fixed
magnetic fields and at $\mu=0$. Using these results, the
$T$-dependence of pole masses of neutral mesons as well as their
directional refraction indices and screening masses will be
determined in Sec. \ref{sec5p3} for various fixed $eB=0.03, 0.2,
0.3$ GeV$^{2}$. We will in particular show that, for non-vanishing
magnetic fields, the refraction index of noninteracting mesons in
the longitudinal direction is equal to unity, while their transverse
refraction index is \textit{larger} than unity. Let us notice that
since the mesons are massive, this does not mean that magnetized
mesons propagate with speed larger than the speed of
light.\footnote{The effect of constant magnetic fields on the
propagation of massless particles is recently discussed in
\cite{alexandre2012}.} The observed anisotropy in the meson
refraction indices is because of the explicit breaking of Lorentz
invariance by uniform magnetic fields. The same anisotropy is also
reflected in the screening masses of neutral mesons in the
longitudinal and transverse directions with respect to the direction
of the background magnetic field. We will plot the $T$-dependence of
mesons screening masses for various fixed $eB$ and $\mu$, and will
show that, in the transverse directions, they are always smaller
than the screening masses in the longitudinal direction. Motivated
by recent experimental activities at RHIC and LHC, we will only
consider the effects of relatively weak and intermediate magnetic
field strength ($eB=0.03, 0.2, 0.3$ GeV$^{2}$). As concerns the
effect of stronger magnetic fields, we will show that they lead to
certain instabilities at low temperature. Our results for $eB=0.5,
0.7$ GeV$^{2}$ are consistent with the main conclusions presented
recently in \cite{gorbar2012}, where a single flavor NJL model is
studied in $2+1$ dimensions in the presence of a strong magnetic
field and at finite temperature. A summary of our results will be
presented in Section \ref{sec6}.
\section{Mathematical Tool: Derivative Expansion of the Quantum Effective Action}\label{sec2}
\setcounter{equation}{0}\par\noindent Let us consider a theory
containing $N$ real scalar fields $(\varphi_{0},\varphi_{1}\cdots,
\varphi_{N-1})\equiv\Phi$, whose dynamics are described by the
effective action $\Gamma_{\mbox{\tiny{eff}}}[\Phi]$. Using an
appropriate derivative expansion, and, in particular, generalizing
the method introduced in \cite{miranskybook, miransky1995} to a
multi-flavor system, we will derive, in this section, the energy
dispersion relations of $\varphi_{\ell},~ \ell=0,\cdots, N-1$. Using
the energy dispersion relation, the pole and screening mass as well
as the \textit{directional} refraction index corresponding to
$\varphi_{\ell}, \ell=0,\cdots,N-1$ will be defined.
\par
Let us start by expanding $\Phi(x)$ around an $x$-independent
configuration $\Phi_{0}$,
\begin{eqnarray}\label{NN1}
\Phi(x)=\Phi_{0}+\bar{\Phi}(x).
\end{eqnarray}
Plugging (\ref{NN1}) in the effective action, we arrive first at
\begin{eqnarray}\label{NN2}
\lefteqn{\Gamma_{\mbox{\tiny{eff}}}[\Phi]=\Gamma_{\mbox{\tiny{eff}}}[\Phi_{0}]+\int
d^{d}x\frac{\delta\Gamma_{\mbox{\tiny{eff}}}}{\delta\varphi_{i}(x)}\bigg|_{\Phi_{0}}\bar{\varphi}_{i}(x)
}\nonumber\\
&&+\frac{1}{2}\int d^{d}x
d^{d}y\frac{\delta^{2}\Gamma_{\mbox{\tiny{eff}}}}{\delta\varphi_{i}(x)\delta\varphi_{j}(y)}
\bigg|_{\Phi_{0}}\bar{\varphi}_{i}(x)\bar{\varphi}_{j}(y)+\cdots.\nonumber\\
\end{eqnarray}
Assuming that $\Phi_{0}$ describes a configuration that minimizes
the effective action, the second term in (\ref{NN2}) vanishes. Using
then the Taylor expansion
\begin{eqnarray}\label{NN3}
\bar{\Phi}(y)=\bar{\Phi}(x)+z^{\mu}\partial_{\mu}\bar{\Phi}(x)+\frac{1}{2}z^{\mu}z^{\nu}
\partial_{\mu}\partial_{\nu}\bar{\Phi}(x)+\cdots,\nonumber\\
\end{eqnarray}
with $z\equiv y-x$, and neglecting the terms linear in $z$, we get
\begin{eqnarray}\label{NN4}
\lefteqn{\hspace{-0.3cm}\Gamma_{\mbox{\tiny{eff}}}[\Phi]=\Gamma_{\mbox{\tiny{eff}}}[\Phi_{0}]-\frac{1}{2}\int
d^{d}x\
{\cal{M}}^{2}_{ij}[\Phi_{0}]\bar{\varphi}_{i}(x)\bar{\varphi}_{j}(x)}\nonumber\\
&&\hspace{-0.3cm}+\frac{1}{2}\int d^{d}x\
\chi_{ij}^{\mu\nu}[\Phi_{0}]\partial_{\mu}\bar{\varphi}_{i}(x)\partial_{\nu}\bar{\varphi}_{j}(x)+\cdots,
\end{eqnarray}
where the summation over $i,j=0,\cdots N-1$ is skipped. In
(\ref{NN4}), the ``squared mass matrix'' ${\cal{M}}_{ij}^{2}$ and
the ``kinetic matrix'' $\chi_{ij}^{\mu\nu}$ are given by
\begin{eqnarray}
\hspace{-0.3cm}{\cal{M}}_{ij}^{2}[\Phi_{0}]&\equiv&-\int
d^{d}z\frac{\delta^{2}\Gamma_{\mbox{\tiny{eff}}}}{\delta\varphi_{i}(0)\delta\varphi_{j}(z)}\bigg|_{\Phi_{0}},\label{NN5}\\
\hspace{-0.3cm}\chi_{ij}^{\mu\nu}[\Phi_{0}]&\equiv& -\frac{1}{2}\int
d^{d}z
z^{\mu}z^{\nu}\frac{\delta^{2}\Gamma_{\mbox{\tiny{eff}}}}{\delta\varphi_{i}(0)\delta\varphi_{j}(z)}\bigg|_{\Phi_{0}}.\label{NN6}
\end{eqnarray}
The above derivative expansion of $\Gamma_{\mbox{\tiny{eff}}}[\Phi]$
from (\ref{NN4}) can alternatively be given as
\begin{eqnarray}\label{NN7}
\lefteqn{\hspace{-0.5cm}\Gamma_{\mbox{\tiny{eff}}}[\Phi]=\int
d^{d}x\left(-V[\Phi]\right.}\nonumber\\
&&\left.+\frac{1}{2}\chi_{ij}^{\mu\nu}[\Phi]
\partial_{\mu}\varphi_{i}(x)\partial_{\nu}\varphi_{j}(x)+\cdots\right),
\end{eqnarray}
where, all non-derivative terms in (\ref{NN4}) are summed up into
the potential part of the effective action $V[\Phi]$, and the terms
with two derivatives yield the kinetic part of the effective action,
proportional to $\chi_{ij}^{\mu\nu}$. To have a connection to the
example that will be worked out in the subsequent sections, let us
assume a fixed configuration for
$\Phi_{0}=\left(\varphi_{0(0)},0,0,\cdots,0\right)$, with
$\varphi_{0(0)}$= const., that spontaneously breaks the $O(N)$
symmetry of the original action. Using (\ref{NN5}), or equivalently
\begin{eqnarray}\label{NN8}
\hspace{-0cm}{\cal{M}}_{00}^{2}[\Phi_{0}]&=&-\int
d^{d}z\frac{\delta^{2}\Gamma_{\mbox{\tiny{eff}}}}{\delta\varphi_{0}(z)\delta\varphi_{0}(0)}\bigg|_{\Phi_{0}},\nonumber\\
\hspace{-0cm}{\cal{M}}_{\ell m}^{2}[\Phi_{0}]&=&-\int
d^{d}z\frac{\delta^{2}\Gamma_{\mbox{\tiny{eff}}}}{\delta\varphi_{\ell}(z)\delta\varphi_{m}(0)}\bigg|_{\Phi_{0}},
\end{eqnarray}
$\forall~\ell,m\geq 1$, it is possible to determine the squared mass
matrices corresponding to the collective modes
$\varphi_{0},\varphi_{1},\cdots,\varphi_{N-1}$. To determine the
kinetic part of the effective action, we use, as in
\cite{miransky1995}, the Ansatz
\begin{eqnarray}\label{NN9}
\tilde{\chi}_{ij}^{\mu\nu}[\Phi]=(F_{1}^{\mu\nu})_{ij}+2F_{2}^{\mu\nu}\frac{\varphi_{i}\varphi_{j}}{\Phi^{2}},
\end{eqnarray}
$\forall~i,j=0,1,\cdots N-1$. Here,
$\Phi^{2}=\sum_{i=0}^{N-1}\varphi_{i}^{2}$ and
$\tilde{\chi}^{\mu\nu}_{ij}[\Phi_{0}]=\chi_{ij}^{\mu\nu}[\Phi_{0}]$,
appearing in (\ref{NN4}). Plugging (\ref{NN9}) in (\ref{NN7}), the
kinetic part of the effective Lagrangian density including two
derivatives is given by
\begin{eqnarray}\label{NN10}
{\cal{L}}_{k}=\frac{1}{2}(F_{1}^{\mu\nu})_{ij}\partial_{\mu}\varphi_{i}\partial_{\nu}\varphi_{j}+\frac{F_{2}^{\mu\nu}}{\Phi^{2}}\left(\varphi_{i}\partial_{\mu}\varphi_{i}\right)
\left(\varphi_{j}\partial_{\nu}\varphi_{j}\right).\nonumber\\
\end{eqnarray}
To determine the form factors $F_{1}^{\mu\nu}$ and $F_{2}^{\mu\nu}$,
or at least a combination of these two form factors, we will use the
definition of $\Gamma_{\mbox{\tiny{eff}}}^{k}\equiv \int d^{d}x
{\cal{L}}_{k}$, as a part of the effective action including only two
derivatives \cite{miransky1995}. We get
\begin{eqnarray}\label{NN11}
\hspace{-0.3cm}\frac{\delta^{2}\Gamma_{\mbox{\tiny{eff}}}^{k}}{\delta\varphi_{0}(x)\delta\varphi_{0}(0)}\bigg|_{\Phi_{0}}
=-{\cal{G}}^{\mu\nu}\bigg|_{\Phi_{0}}\partial_{\mu}\partial_{\nu}\delta^{d}(x),
\end{eqnarray}
with
${\cal{G}}^{\mu\nu}\equiv\big[(F_{1}^{\mu\nu})_{00}+2F_{2}^{\mu\nu}\big]$,
and
\begin{eqnarray}\label{NN12}
\hspace{-0.3cm}\frac{\delta^{2}\Gamma_{\mbox{\tiny{eff}}}^{k}}{\delta\varphi_{\ell}(x)\delta\varphi_{m}(0)}\bigg|_{\Phi_{0}}=-({\cal{F}}^{\mu\nu})_{\ell
m}\bigg|_{\Phi_{0}}\partial_{\mu}\partial_{\nu}\delta^{d}(x),
\end{eqnarray}
$\forall~\ell,m\geq 1$, where $({\cal{F}}^{\mu\nu})_{\ell
m}\equiv\frac{1}{2}\big[(F_{1}^{\mu\nu})_{\ell
m}+(F_{1}^{\mu\nu})_{m\ell}\big]$. From (\ref{NN11}) and
(\ref{NN12}) we have
\begin{eqnarray}\label{NN13}
{\cal{G}}^{\mu\nu}[\Phi_{0}]&=&-\frac{1}{2}\int d^{d}z
z^{\mu}z^{\nu}\frac{\delta^{2}\Gamma_{\mbox{\tiny{eff}}}^{k}}{\delta\varphi_{0}(z)\delta\varphi_{0}(0)}\bigg|_{\Phi_{0}},\nonumber\\
({\cal{F}}^{\mu\nu})_{\ell m}[\Phi_{0}]&=&-\frac{1}{2}\int d^{d}z
z^{\mu}z^{\nu}\frac{\delta^{2}\Gamma_{\mbox{\tiny{eff}}}^{k}}{\delta\varphi_{\ell}(z)\delta\varphi_{m}(0)}\bigg|_{\Phi_{0}},\nonumber\\
\end{eqnarray}
$\forall~\ell,m\geq 1$. Comparing the above relations with
$\chi_{ij}^{\mu\nu}$ from (\ref{NN6}), it turns out that
$\chi_{00}^{\mu\nu}={\cal{G}}^{\mu\nu}$ and $\chi_{\ell
m}^{\mu\nu}=({\cal{F}}^{\mu\nu})_{\ell m},\forall~\ell,m\geq 1$.
Assuming then $ ({\cal{M}}^{2})_{\ell m}=-({\cal{M}}^{2})_{m\ell}$,
and $({\cal{F}}^{\mu\nu})_{\ell m}=-({\cal{F}}^{\mu\nu})_{m\ell}$,
$\forall~\ell\neq m$ and $\ell,m\geq 1$,\footnote{This will be shown
in our specific example in the subsequent sections.} and denoting
${\cal{M}}^{2}_{00}$ by $M_{0}^{2}$, as well as
${\cal{M}}^{2}_{\ell\ell}$ by $M_{\ell}^{2}$ for
$\ell=1,\cdots,N-1$, the effective action (\ref{NN4}) simplifies as
\begin{eqnarray}\label{NN14}
\lefteqn{\hspace{-1cm}\Gamma_{\mbox{\tiny{eff}}}[\Phi]=\Gamma_{\mbox{\tiny{eff}}}[\Phi_{0}]-\frac{1}{2}\int
d^{d}x\bar{\varphi_{0}}\left(M_{0}^{2}+{\cal{G}}^{\mu\mu}\partial_{\mu}^{2}\right)\bar{\varphi}_{0}
}\nonumber\\
&&\hspace{-.5cm}-\frac{1}{2}\sum\limits_{\ell=1}^{N-1}\int
d^{d}x\bar{\varphi_{\ell}}\big[M_{\ell}^{2}+({\cal{F}}^{\mu\mu})_{\ell\ell}\partial_{\mu}^{2}\big]\bar{\varphi}_{\ell}.
\end{eqnarray}
Here, we have used the fact that ${\cal{G}}^{\mu\nu}$ and
${\cal{F}}^{\mu\nu}$ are diagonal, i.e.
${\cal{G}}^{\mu\nu}={\cal{G}}^{\mu\mu}g^{\mu\nu}$ as well as
${\cal{F}}^{\mu\nu}={\cal{F}}^{\mu\mu}g^{\mu\nu}$. The same
relations are shown to be valid in a single-flavor case
\cite{miransky1995}. From (\ref{NN14}), the general expressions for
the energy dispersion relation of noninteracting $\varphi_{\ell},
\ell=0,\cdots,N-1$ fields can be determined. For $\ell=0$, we have
\begin{eqnarray}\label{NN15}
\hspace{-1cm}E_{\varphi_{0}}^{2}\equiv\frac{1}{{\cal{G}}^{00}}\left({\cal{G}}^{11}p_{1}^{2}+{\cal{G}}^{22}p_{2}^{2}+{\cal{G}}^{33}
p_{3}^{2}+M_{0}^{2}\right),
\end{eqnarray}
and for $\forall \ell\geq 1$, we have
\begin{eqnarray}\label{NN15b}
\lefteqn{E_{\varphi_{\ell}}^{2}\equiv}\nonumber\\
&&\hspace{-0.5cm}\frac{1}{({\cal{F}}^{00})_{\ell\ell}}\big[({\cal{F}}^{11})_{\ell\ell}~p_{1}^{2}+({\cal{F}}^{22})_{\ell\ell}~p_{2}^{2}+
({\cal{F}}^{33})_{\ell\ell}~p_{3}^{2}+M_{\ell}^{2}\big].\nonumber\\
\end{eqnarray}
Using the above energy dispersion relations, the pole masses of free
$\varphi_{0}$ and $\varphi_{\ell}, \ell\geq 1$ are given by
\begin{eqnarray}\label{NN16}
m_{0}^{2}=\frac{M_{0}^{2}}{{\cal{G}}^{00}}, \qquad\mbox{and}\qquad
m_{\ell}^{2}=\frac{M_{\ell}^{2}}{({\cal{F}}^{00})_{\ell\ell}},
\end{eqnarray}
respectively. The screening masses $m _{\ell}^{(i)}$, and
``directional'' refraction indices $u_{\ell}^{(i)}$ of
noninteracting $\varphi_{\ell}, \ell=0,1,\cdots,N-1$ fields in the
$i$-th directions ($i=1,2,3$) are defined by
\begin{eqnarray}\label{NN17}
\hspace{-0.8cm}m_{0}^{(i)}=\frac{m_{0}}{u_{0}^{(i)}},\qquad\mbox{where}\qquad
(u_{0}^{(i)})^{2}=\frac{{\cal{G}}^{ii}}{{\cal{G}}^{00}},
\end{eqnarray}
for $\ell=0$, as well as
\begin{eqnarray}\label{NN18}
\hspace{-0.4cm}m_{\ell}^{(i)}=\frac{m_{\ell}}{u_{\ell}^{(i)}},\qquad\mbox{where}\qquad
(u_{\ell}^{(i)})^{2}=\frac{({\cal{F}}^{ii})_{\ell\ell}}{({\cal{F}}^{00})_{\ell\ell}},
\end{eqnarray}
for $\ell\geq 1$ [see Sec. \ref{sec5} for more details on the
definition of screening masses and refraction indices].
\par
In the present paper, we will use the above dispersion relations, to
describe the properties of \textit{noninteracting} $\sigma$ and
$\vec{\pi}$ mesons in a hot and magnetized medium. We will focus, in
particular, on $\sigma$ and $\pi_{3}$ mesons. The latter will be
identified with the neutral pion, $\pi_{3}\equiv \pi^{0}$. To do
this, we will first consider, in the next section, a two-flavor NJL
model including appropriate four-fermion interactions. Defining the
meson fields $\sigma$ and $\vec{\pi}$ in terms of fermionic fields,
and eventually integrating the fermions in the presence of a
constant magnetic field, we arrive at the one-loop effective action
$\Gamma_{\mbox{\tiny{eff}}}[\sigma,\vec{\pi}]$, describing the
dynamics of magnetized meson fields. We will spontaneously break the
chiral symmetry of the original theory, by choosing a fixed
configuration
$(\sigma_{0},\vec{\pi}_{0})=(\mbox{const.},{\mathbf{0}})$, that
minimizes $\Gamma_{\mbox{\tiny{eff}}}[\sigma,\vec{\pi}]$. Using then
the formalism described in the present section for the specific case
of $N=4$, and identifying $\varphi_{0}$ with the $\sigma$-meson and
$\varphi_{\ell}, \ell=1,2,3$ with the pions $\pi_{\ell},
\ell=1,2,3$, we will determine the temperature dependence of the
pole and screening mass, as well as the directional refraction
indices of noninteracting neutral $\sigma$ and $\pi^{0}$ mesons at
finite temperature and in the presence of a constant magnetic field.
We will postpone the discussion on the properties of charged and
magnetized pions to a future publication \cite{sadooghi2012-3}.
\section{One-loop effective potential of a two-flavor NJL model at finite $(T,\mu,eB)$}\label{sec3}
\setcounter{equation}{0}\par\noindent In this section, we will
determine the one-loop effective potential corresponding to a
two-flavor magnetized NJL model at finite temperature and density.
The minima of this effective potential will then be used in the
subsequent sections to determine the kinetic coefficients and mass
matrices corresponding to neutral $\sigma$ and $\pi^{0}$ mesons.
\par
Let us start by introducing the Lagrangian density of a two-flavor
gauged NJL model in the presence of a constant magnetic field
\begin{eqnarray}\label{NE1b}
{\cal{L}}&=&\bar{\psi}(x)\left(i\gamma^{\mu}D_{\mu}-m_{0}\right)\psi(x)+G~\{[\bar{\psi}(x)\psi(x)]^2\nonumber\\
&&+
[\bar{\psi}(x)i\gamma_5\vec{\tau}\psi(x)]^2\}-\frac{1}{4}F^{\mu\nu}F_{\mu\nu}.
\end{eqnarray}
Here, the fermionic fields $\psi^{c}_{f}$ carry apart from the Dirac
index, a flavor index $f\in(1,2)=(u,d)$ and a color index
$c\in(1,2,3)=(r,g,b)$. In the chiral limit $m_{0}\to 0$, this
implies the $SU_{L}(2)\times SU_{R}(2)$ chiral and $SU(3)$ color
symmetry of the theory. The isospin symmetry of the theory is
guaranteed by setting $m_{u}=m_{d}\equiv m_{0}$. The covariant
derivative $D_{\mu}$ in (\ref{NE1b}) is defined by $D_{\mu}\equiv
\partial_{\mu}+ieQA_{\mu}^{ext.}$, where
$Q=\mbox{diag}\left(2/3,-1/3\right)$ is the fermionic charge matrix
coupled to the $U(1)$ gauge field $A_{\mu}^{ext.}$, and
$\vec{\tau}=(\tau_{1},\tau_{2},\tau_{3})$ are the Pauli matrices.
Choosing, the vector potential $A_{\mu}^{ext.}$ in the Landau gauge
$A_{\mu}^{ext.}=(0,0,Bx_{1},0)$, (\ref{NE1b}) describes a two-flavor
NJL model in the presence of a uniform magnetic field
$\mathbf{B}=B\mathbf{e}_{3}$, aligned in the third direction. The
field strength tensor $F_{\mu\nu}$ is defined as usual by
$F_{\mu\nu}=\partial_{[\mu}A_{\nu]}^{ext.}$, with $A_{\mu}^{ext.}$
fixed as above. As it turns out, the above Lagrangian is equivalent
with the semi-bosonized Lagrangian
\begin{eqnarray}\label{NE2b}
{\cal{L}}_{sb}&=&\bar{\psi}(x)\left(i\gamma^{\mu}D_{\mu}-m_{0}\right)\psi(x)-\bar{\psi}\left(\sigma+i\gamma_5\vec{\tau}\cdot\vec{\pi}\right)\psi\nonumber\\
&&-\frac{(\sigma^2+\vec{\pi}^2)}{4G}-\frac{B^2}{2},
\end{eqnarray}
where the Euler-Lagrange equations of motion for the auxiliary
fields lead to the constraints
\begin{eqnarray}\label{NE3b}
\sigma(x)&=&-2G\bar{\psi}(x)\psi(x),\nonumber\\
\vec{\pi}(x)&=&-2G\bar{\psi}(x)i\gamma_5\vec{\tau}\psi(x).
\end{eqnarray}
To determine the one-loop effective action corresponding to
(\ref{NE1b}) as a functional of $\sigma$ and $\vec{\pi}$, the
fermionic fields $\psi$ and $\bar{\psi}$ in (\ref{NE2b}) are to be
integrated out. Using
\begin{eqnarray}\label{NE4b}
e^{i\Gamma_{\mbox{\tiny{eff}}}[\sigma,\vec{\pi}]}=\int{\cal{D}}\psi{\cal{D}}\bar{\psi}\exp\left(i\int
d^{4}x~{\cal{L}}_{sb}\right),
\end{eqnarray}
the one-loop effective action $\Gamma_{\mbox{\tiny{eff}}}$ is then
given by
\begin{eqnarray}\label{NE5b}
\Gamma_{\mbox{\tiny{eff}}}[\sigma,\vec{\pi}]=\Gamma_{\mbox{\tiny{eff}}}^{(0)}[\sigma,\vec{\pi}]
+\Gamma_{\mbox{\tiny{eff}}}^{(1)}[\sigma,\vec{\pi}],
\end{eqnarray}
where the tree level part, $\Gamma_{\mbox{\tiny{eff}}}^{(0)}$, and
the one-loop part, $\Gamma_{\mbox{\tiny{eff}}}^{(1)}$, are given by
\begin{eqnarray}\label{NE6b}
\Gamma_{\mbox{\tiny{eff}}}^{(0)}[\sigma,\vec{\pi}]=-\int
d^{4}x\left(\frac{\sigma^{2}+\vec{\pi}^{2}}{4G}+\frac{B^{2}}{2}\right),
\end{eqnarray}
and
\begin{eqnarray}\label{NE7b}
\Gamma_{\mbox{\tiny{eff}}}^{(1)}[\sigma,\vec{\pi}]=-i
{\mbox{Tr}}_{\{cfsx\}}\ln[i{S_{Q}^{-1}(\sigma,\vec{\pi})}].
\end{eqnarray}
Here, $m\equiv m_{0}+\sigma(x)$ and
\begin{eqnarray}\label{NE8b}
iS^{-1}_{Q}(\sigma,\vec{\pi})\equiv
i\gamma^{\mu}D_{\mu}-\left(m+i\gamma^{5}\vec{\tau}\cdot\vec{\pi}\right),
\end{eqnarray}
is the inverse fermion propagator. To determine
$\Gamma_{\mbox{\tiny{eff}}}^{(1)}[\sigma,\vec{\pi}]$, let us assume
a constant and fixed configuration
$(\sigma_{0},\vec{\pi}_{0})=(\mbox{const.}, {\mathbf{0}})$ for the
collective modes $(\sigma,\vec{\pi})$, that breaks the
$SU_{L}(2)\times SU_{R}(2)$ chiral symmetry of the original action
in the chiral limit. Only in this case, $m$ can be replaced by the
constant constituent quark mass $m=m_{0}+\sigma_{0}$, where
$\sigma_{0}=$const. The one-loop effective potential is given by
evaluating the trace operation in (\ref{NE7b}), that includes a
trace over color $c$, flavor $f$, and spinor $s$ degrees of freedom,
as well as a trace over a four-dimensional space-time coordinate
$x$. Following the standard method introduced e.g. in
\cite{fayazbakhsh2010}, and after a straightforward computation, the
one-loop part of the effective action
$\Gamma_{\mbox{\tiny{eff}}}^{(1)}[\sigma_{0}]$ reads
\begin{eqnarray}\label{NE9b}
\Gamma^{(1)}_{\mbox{\tiny{eff}}}[\sigma_{0}]=-6i\sum\limits_{q\in\{\frac{2}{3},-\frac{1}{3}\}}
\ln{\det}_{x}[E_{q}^2-p_{0}^2],
\end{eqnarray}
where the energy of a charged fermion in a constant magnetic field
is given by
\begin{eqnarray}\label{NE10b}
E_{q}\equiv\sqrt{\bar{\mathbf{p}}_{q}^{2}+m^{2}}=\sqrt{2|qeB|p+p_{3}^{2}+m^{2}}.
\end{eqnarray}
Here, the Ritus four-momentum
\begin{eqnarray}\label{NE11b}
\bar{p}_{q}=(p_{0},0,-\mbox{sgn}(q eB)\sqrt{2|q eB|p}, p_{3}),
\end{eqnarray}
arises from the solutions of Dirac equation in the presence of a
constant magnetic field (see \cite{ritus, sadooghi2012} for more
details on the Ritus Eingenfunction method). In (\ref{NE10b}), $p$
labels the corresponding Landau levels appearing in the presence of
a uniform magnetic field. Performing the remaining determinant over
the coordinate space in (\ref{NE9b}) leads to the effective
(thermodynamic) potential $\Omega_{\mbox{\tiny{eff}}}^{(1)}$ defined
by $\Omega_{\mbox{\tiny{eff}}}^{(1)}\equiv
-{\cal{V}}^{-1}\Gamma_{\mbox{\tiny{eff}}}^{(1)}$, where the factor
${\cal{V}}$ denotes the four-dimensional space-time volume. The
final form of $\Omega_{\mbox{\tiny{eff}}}^{(1)}$ is then determined
in the momentum space, where the effect of finite temperature and
chemical potential is introduced by replacing $p_{0}$ in
(\ref{NE9b}) with $p_{0}=i\omega_{n}-\mu$. Here, the Matsubara
frequencies $\omega_{n}$ are defined by $\omega_{n}=(2n+1)\pi T$.
Using the standard replacement
\begin{eqnarray}\label{NE12b}
\lefteqn{\int\frac{d^4
p}{(2\pi)^4}f\left(p_{0},\bar{\mathbf{p}}\right)}\nonumber\\
&=&\frac{|qeB|}{\beta}\sum_{n=-\infty}^{+\infty}\sum\limits_{p=0}^{+\infty}\alpha_{p}\int
_{-\infty}^{+\infty}\frac{dp_{3}}{8\pi^{2}}~f(i\omega_{n}-\mu,p,p_{3}),\nonumber\\
\end{eqnarray}
with $p$ labeling the Landau levels and $\beta\equiv T^{-1}$, and
after summing over the Matsubara frequencies $n$, the (one-loop)
effective potential of the model reads
\begin{eqnarray}\label{NE13b}
\lefteqn{\Omega_{\mbox{\tiny{eff}}}^{(1)}
=-3\sum\limits_{q\in\{\frac{2}{3},-\frac{1}{3}\}}\frac{|q
eB|}{\beta}
}\nonumber\\
&&\times
\sum^{+\infty}_{p=0}\alpha_{p}\int_{-\infty}^{+\infty}\frac{dp_{3}}{4\pi^{2}}\left\{\beta
E_{q}+\ln\left(1+e^{-\beta(E_{q}+\mu)}\right)\right.\nonumber\\
&&\left.+\ln\left(1+e^{-\beta(E_{q}-\mu)}\right)\right\}.
\end{eqnarray}
Here, $\alpha_{p}=2-\delta_{p0}$ is the spin degeneracy factor. As
it turns out, the above expression for
$\Omega_{\mbox{\tiny{eff}}}^{(1)}$ consists of a
$(T,\mu)$-independent and a $(T,\mu)$-dependent term. The
$(T,\mu)$-independent part of $\Omega_{\mbox{\tiny{eff}}}^{(1)}$ is
divergent and is to be appropriately regulated. In the Appendix, we
have followed the method presented in \cite{providencia2008}, and
shown that the $(T,\mu)$-independent part of
$\Omega_{\mbox{\tiny{eff}}}^{(1)}$ is given by (\ref{appB13}).
Adding this part to the tree level part of the effective potential,
(\ref{NE6b}), as well as to the $(T,\mu)$-dependent part of
$\Omega_{\mbox{\tiny{eff}}}^{(1)}$, we arrive at the final
expression for the one-loop effective potential of a two-flavor NJL
model at finite $(T,\mu)$ and in the presence of a uniform magnetic
field aligned in the third direction
\begin{widetext}
\begin{eqnarray}\label{NE14b}
\lefteqn{\hspace{-0.8cm}\Omega_{\mbox{\tiny{eff}}}(m;T,\mu,eB)=\frac{\sigma^{2}}{4G}+\frac{B^{2}}{2}-\frac{3}{2\pi^{2}}\sum\limits_{q\in\{\frac{2}{3},-\frac{1}{3}\}}|qeB|^{2}\left\{\zeta'\left(-1,x_{q}\right)+\frac{x_{q}^{2}}{4}+\frac{x_{q}}{2}(1-x_{q})\ln
x_{q}\right\}
}\nonumber\\
&&
+\frac{3}{4\pi^{2}}\left\{m^{4}\ln\left(\frac{\Lambda+\sqrt{\Lambda^{2}+m^{2}}}{m}\right)-\Lambda(2\Lambda^{2}+m^{2})\sqrt{\Lambda^{2}+m^{2}}\right\}\nonumber\\
&&-3\sum\limits_{q\in\{\frac{2}{3},-\frac{1}{3}\}}\frac{|q
eB|}{\beta}\sum^{+\infty}_{p=0}\alpha_{p}\int_{-\infty}^{+\infty}\frac{dp_{3}}{4\pi^{2}}\left
\{\ln\left(1+e^{-\beta(E_{q}+\mu)}\right)+\ln\left(1+e^{-\beta(E_{q}-\mu)}\right)\right\}.
\end{eqnarray}
\end{widetext}
Here, $x_{q}\equiv \frac{m^{2}}{2|qeB|}$, $\Lambda$ is an
appropriate ultraviolet (UV) momentum cutoff,
$\zeta'(-1,x_{q})\equiv \frac{d\zeta(s,x_{q})}{ds}\big|_{s=1}$ and
$E_{q}$ is given in (\ref{NE10b}). In Sec. \ref{sec5}, after fixing
a number of free parameters, such as the coupling $G$ and the UV
cutoff $\Lambda$, the global minima of
$\Omega_{\mbox{\tiny{eff}}}(m;T,\mu,eB)$ will be determined
numerically. They will be then used to determine the squared mass
matrices $M_{\sigma}^{2}$ and $M_{{\pi}^{0}}^{2}$ and the form
factors (kinetic coefficients) ${\cal{G}}^{\mu\nu}$ and
$({\cal{F}}^{\mu\nu})_{33}$, corresponding to the neutral mesons
$\sigma$ and $\pi^{0}$, at finite $(T,\mu)$ and $eB$.
\section{Effective kinetic part of the one-loop effective action of a two-flavor NJL model at finite $(T,\mu,eB)$}\label{sec4}
\setcounter{equation}{0}
\par\noindent
In the previous section, the one-loop effective potential of a
magnetized two-flavor NJL model at finite $(T,\mu)$ is computed by
evaluating the trace operation in (\ref{NE7b}) for a fixed field
configuration $
\Phi_{0}=(\sigma_{0},\vec{\pi}_{0})=(\mbox{const.},{\mathbf{0}})$,
which is supposed to minimize the one-loop effective potential
(\ref{NE14b}) of the model. In the next two sections, we will
compute the squared meson mass matrices and form factors of the
effective kinetic part of the one-loop effective action
corresponding to neutral mesons $\sigma$ and $\pi^{0}$. This
computation includes an analytical and a numerical part. In this
section, after reformulating the general derivation presented in
Sec. \ref{sec2}, and making it compatible with our case of
magnetized two-flavor NJL model, we will present the analytical
results of the squared mass matrices $(M_{\sigma}^{2},
M_{\pi^{0}}^{2})$ and form factors $({\cal{G}}^{\mu\nu},
{\cal{F}}^{\mu\nu})$ for neutral mesons up to a one-dimensional
integration over $p_{3}$-momentum and a summation over Landau levels
$p$. They shall be performed numerically. The results of the
numerical computation will be presented in Sec. \ref{sec5}, where we
explore the $(T,\mu,eB)$ dependence of $(M_{\sigma}^{2},
M_{\pi^{0}}^{2})$ and $({\cal{G}}^{\mu\nu}, {\cal{F}}^{\mu\nu})$.
Using these quantities the pole and screening masses of free neutral
mesons and their directional refraction indices will be determined
for various $(T,\mu,eB)$.
\par
As we have described in Sec. \ref{sec2}, our goal is to bring the
effective action of a two-flavor NJL model including
$(\sigma,\vec{\pi})$ mesons, in the form
\begin{eqnarray}\label{ND1b}
\lefteqn{\hspace{-0.6cm}\Gamma_{\mbox{\tiny{eff}}}[\sigma,\vec{\pi}]=\Gamma_{\mbox{\tiny{eff}}}[\sigma_{0}]}\nonumber\\
&&\hspace{-0.8cm}-\frac{1}{2}\int
d^{d}x~\bar{\sigma}(x)\left(M_{\sigma}^{2}+{\cal{G}}^{\mu\mu}\partial_{\mu}^{2}\right)\bar{\sigma}(x)\nonumber\\
&&\hspace{-0.8cm}-\frac{1}{2}\sum\limits_{\ell=1}^{3}\int
d^{d}x~\bar{\pi}_{\ell}(x)\left(M_{\vec{\pi}}^{2}+
{\cal{F}}^{\mu\mu}\partial_{\mu}^{2}\right)_{\ell\ell}\bar{\pi}_{\ell}(x),
\end{eqnarray}
which is valid in a truncation of the derivative expansion of the
full effective action $\Gamma_{\mbox{\tiny{eff}}}[\sigma,\vec{\pi}]$
up to two derivatives. According to (\ref{NN8}), the squared mass
matrices of neutral mesons, $\sigma$ and $\pi^{0}$, are given
by\footnote{Here, the third component of $\vec{\pi}$ is identified
with $\pi^{0}$, i.e. $\pi_{3}=\pi^{0}$.}
\begin{eqnarray}\label{ND2b}
M_{\sigma}^{2}&\equiv&-\int
d^{4}z\frac{\delta^{2}\Gamma_{\mbox{\tiny{eff}}}}{\delta\sigma(0)\delta\sigma(z)}\bigg|_{(\sigma_{0},{\mathbf{0}})},\nonumber\\
\hspace{-0.5cm}(M_{\vec{\pi}}^{2})_{33}&\equiv&-\int
d^{4}z\frac{\delta^{2}\Gamma_{\mbox{\tiny{eff}}}}{\delta\pi_{3}(0)\delta\pi_{3}(z)}\bigg|_{(\sigma_{0},{\mathbf{0}})},
\end{eqnarray}
and, according to (\ref{NN13}), the form factors of the effective
kinetic part of the effective action, corresponding to $\sigma$ and
$\pi^{0}$, read
\begin{eqnarray}\label{ND3b}
{\cal{G}}^{\mu\nu}&\equiv&-\frac{1}{2}\int
d^{4}z z^{\mu}z^{\nu}\frac{\delta^{2}\Gamma_{\mbox{\tiny{eff}}}^{k}}{\delta\sigma(0)\delta\sigma(z)}\bigg|_{(\sigma_{0},{\mathbf{0}})},\nonumber\\
({\cal{F}}^{\mu\nu})_{33}&\equiv&-\frac{1}{2}\int d^{4}z
z^{\mu}z^{\nu}\frac{\delta^{2}\Gamma_{\mbox{\tiny{eff}}}^{k}}{\delta\pi_{3}(0)\delta\pi_{3}(z)}\bigg|_{
(\sigma_{0},{\mathbf{0}})}\hspace{-0.7cm}.
\end{eqnarray}
To simplify our notations, we will denote in the rest of this paper,
the mass squared matrix $(M_{\vec{\pi}}^{2})_{33}$ from (\ref{ND2b})
corresponding to $\pi^{0}$ by $M_{\pi^{0}}^{2}$. Similarly,
$({\cal{F}}^{\mu\nu})_{33}$ will be denoted by ${\cal{F}}^{\mu\nu}$.
Whereas the mesons squared mass matrices at zero temperature and
chemical potential are given by plugging the effective action
(\ref{NE5b})-(\ref{NE8b}) in (\ref{ND2b}) and read
\begin{eqnarray}
M_{\sigma}^{2}&=&\frac{1}{2G}-i\int
d^{4}z\mbox{tr}_{sfc}\big[S_{Q}(z,0)S_{Q}(0,z)\big],\label{ND4b}\\
\hspace{-0.5cm}M_{\pi^{0}}^{2}&=&\frac{1}{2G}+i\int
d^{4}z\mbox{tr}_{sfc}\big[S_{Q}(z,0)\tau_{3}\gamma^{5}S_{Q}(0,z)\gamma^{5}\tau_{3}\big],\nonumber\\
\label{ND5b}
\end{eqnarray}
the form factors (\ref{ND3b}) arise by replacing
$\Gamma_{\mbox{\tiny{eff}}}^{k}$ with the one-loop effective
potential $\Gamma_{\mbox{\tiny{eff}}}^{(1)}$ from
(\ref{NE6b})-(\ref{NE8b}),
\begin{eqnarray}
{\cal{G}}^{\mu\nu}&=&-\frac{i}{2}\int
d^{4}z z^{\mu}z^{\nu}\mbox{tr}_{sfc}\big[S_{Q}(z,0)S_{Q}(0,z)\big],\label{ND6b}\\
{\cal{F}}^{\mu\nu}&=&\frac{i}{2}\int
d^{4}z z^{\mu}z^{\nu}\mbox{tr}_{sfc}\big[S_{Q}(z,0)\tau_{3}\gamma^{5}S_{Q}(0,z)\gamma^{5}\tau_{3}\big].\nonumber\\
\label{ND7b}
\end{eqnarray}
Similar expressions for ${\cal{G}}^{\mu\nu}$ and
${\cal{F}}^{\mu\nu}$ are also presented in
\cite{miransky1995,sadooghi2009} for a single-flavor NJL model. To
study the effect of very strong magnetic fields, the authors of
\cite{miransky1995, sadooghi2009} use the fermion propagator,
arising from Schwinger proper-time method \cite{schwinger1960}, in
the LLL approximation. In the present paper, however, we are
interested on the full $eB$ dependence of these coefficients for the
whole range of $eB\in [0,1]$ GeV$^{2}$, and have to consider, in
contrast to \cite{miransky1995, sadooghi2009}, the contributions of
higher Landau levels too. To do this, we use the Ritus fermion
propagator
\begin{eqnarray}\label{ND8b}
S_{Q}(x,y)&=&i\sum_{p=0}^{\infty}\int{\cal{D}}\tilde{p}~e^{-i\tilde{p}\cdot
(x-y)}\nonumber\\
&&\times P_{p}(x_{1})D_{Q}^{-1}(\bar{p})~P_{p}(y_{1}),
\end{eqnarray}
arising from the solution of Dirac equation in the presence of
uniform magnetic field using Ritus eigenfunction method. The same
expression for $S_{Q}(x,y)$ appears also in \cite{fukushima2009}. In
(\ref{ND8b}), $\tilde{p}\equiv (p_{0},0,p_{2},p_{3})$,
${\cal{D}}\tilde{p}\equiv \frac{dp_{0}dp_{2}dp_{3}}{(2\pi)^{3}}$,
and $P_{p}(x_{1})$ is given by
\begin{eqnarray}\label{ND9b}
\hspace{-0.2cm}P_{p}(x_{1})&=&\frac{1}{2}[f_{p}^{+s}(x_{1})+\Pi_{p}f_{p}^{-s}(x_{1})]
\nonumber\\
&&\hspace{-0.2cm}+\frac{is}{2}[f_{p}^{+s}(x_{1})-\Pi_{p}f_{p}^{-s}(x_{1})]
\gamma^{1}\gamma^{2},
\end{eqnarray}
where, $s\equiv \mbox{sgn}(QeB)$, and $\Pi_{p}\equiv 1-\delta_{p0}$
considers the spin degeneracy in the LLL. The functions $f_{p}^{\pm
s}(x_{1})$ are defined by
\begin{eqnarray}\label{ND10b}
\begin{array}{rclcrcl}
f_{p}^{+s}(x_{1})&=&\phi_{p}\left(x_{1}-sp_{2}\ell_{B}^{2}\right),&&
p&=&0,1,2,\cdots,\nonumber\\
f_{p}^{-s}(x_{1})&=&\phi_{p-1}\left(x_{1}-sp_{2}\ell_{B}^{2}\right),&&
p&=&1,2,3,\cdots,
\end{array}
\hspace{-0.2cm}\nonumber\\
\end{eqnarray}
where $\phi_{p}(x)$ is a function of Hermite polynomials $H_{p}(x)$
in the form
\begin{eqnarray}\label{ND11b}
\phi_{p}(x)=a_{p}\exp\left(-\frac{x^{2}}{2\ell_{B}^{2}}\right)H_{p}\left(\frac{x}{\ell_{B}}\right).
\end{eqnarray}
Here, $a_{p}\equiv (2^{p}p!\sqrt{\pi}\ell_{B})^{-1/2}$ is the
normalization factor and $\ell_{B}\equiv |QeB|^{-1/2}$ is the
magnetic length. In (\ref{ND8b}),
$D_{Q}(\bar{p})\equiv\gamma\cdot\bar{p}_{Q}-m$, with the Ritus
four-momentum from (\ref{NE11b}). Note that since $Q$ is a $2\times
2$ matrix in the flavor space, $f_{p}^{\pm s}$ and therefore
$P_{p}(x_{1})$ are matrices in the flavor space. In what follows, we
will first determine $(M_{\sigma}^{2}, M^{2}_{\pi^{0}})$ and
$({\cal{G}}^{\mu\nu}, {\cal{F}}^{\mu\nu})$ at zero $(T,\mu)$ and in
the presence of a constant magnetic field. We then introduce $T$ and
$\mu$ using standard replacements
\begin{eqnarray}\label{ND12b}
p_{0}=i(2n+1)\pi T-\mu,~~\mbox{and}~~\int\frac{dp_{0}}{2\pi}\to
iT\sum_{n},\nonumber\\
\end{eqnarray}
and present the result for $(M_{\sigma}^{2}, M^{2}_{\pi^{0}})$ and
$({\cal{G}}^{\mu\nu}, {\cal{F}}^{\mu\nu})$ at finite $(T,\mu,eB)$ up
to an integration over $p_{3}$-momentum and a summation over Landau
levels $p$.
\subsection{$(M_{\sigma}^{2}, M_{\pi^{0}}^{2})$ at finite $(T,\mu,eB)$}\label{sec4p1}
\subsubsection{$M_{\sigma}^{2}$ at finite $(T,\mu,eB)$}
\par\noindent
To compute $M_{\sigma}^{2}$ from (\ref{ND4b}), we use the definition
of the Ritus fermion propagator (\ref{ND8b}), and arrive first at
\begin{eqnarray}\label{ND13b}
\lefteqn{M_{\sigma}^{2}=\frac{1}{2G}+i\sum_{q}\int
d^{4}z\sum\limits_{p,k=0}^{\infty}{\cal{D}}\tilde{p}~{\cal{D}}\tilde{k}~e^{-iz\cdot(\tilde{p}-\tilde{k})}
}\nonumber\\
&&
\times\mbox{tr}_{sc}\left(D_{q}^{-1}(\bar{p})P_{p}(0)K_{k}(0)D^{-1}_{q}(\bar{k})K_{k}(z_{1})P_{p}(z_{1})\right).
\nonumber\\
\end{eqnarray}
Here, the summation over $q\in\{\frac{2}{3},-\frac{1}{3}\}$ replaces
the trace in the flavor space, and in $D_{q}$, $q$ are the
eigenvalues of the charge matrix $Q=\mbox{diag}(2/3,-1/3)$. After
performing the integration over $z_{i}, i=0,2,3$, and using the
definition of $D_{q}^{-1}$ as well as the Ritus-momentum
(\ref{NE11b}), with $Q$ replaced by $q$, we get
\begin{eqnarray}\label{ND14b}
\lefteqn{M_{\sigma}^{2}=\frac{1}{2G}+3i\sum_{q}\sum\limits_{p,k=0}^{\infty}\int
\frac{dp_{0}dp_{3}}{(2\pi)^{3}}
}\nonumber\\
&&\times\int
dp_{2}\mbox{tr}_{s}\bigg[\frac{1}{\gamma\cdot\bar{p}_{q}-m}I_{pk}(p_{2},k_{2})\frac{1}{\gamma\cdot\bar{k}_{q}-m}
\nonumber\\
&&\qquad\qquad\times J^{(0)}_{kp}(k_{2},p_{2})
\bigg]\bigg|_{\tilde{k}=\tilde{p}},
\end{eqnarray}
where the factor $3$ behind the integral arises from the trace in
the color space using $\mbox{tr}_{c}(\mathbb{I}_{N_{c}\times
N_{c}})=3$, and two functions $I_{pk}$ and $J_{kp}^{(0)}$ in
(\ref{ND14b}) are given by
\begin{eqnarray}\label{ND15b}
I_{pk}(p_{2},k_{2})&\equiv& P_{p}(0)K_{k}(0),\nonumber\\
J_{kp}^{(0)}(k_{2},p_{2})&\equiv& \int
dz_{1}K_{k}(z_{1})P_{p}(z_{1}).
\end{eqnarray}
Here, $K_{k}(x_{1})$ is defined similar to $P_{p}(x_{1})$ from
(\ref{ND9b})
\begin{eqnarray}\label{ND16b}
\hspace{-0.5cm}K_{k}(x_{1})&=&\frac{1}{2}[g_{k}^{+s}(x_{1})+\Pi_{k}g_{k}^{-s}(x_{1})]\nonumber\\
&&\hspace{-0.3cm}+\frac{is}{2}[g_{k}^{+s}(x_{1})-\Pi_{k}g_{k}^{-s}(x_{1})]
\gamma^{1}\gamma^{2},
\end{eqnarray}
with $g^{\pm s}_{k}(x_{1})$ defined as in (\ref{ND10b}), with
$p_{2}$ replaced by $k_{2}$. Note that for $k_{2}=p_{2}$, which is
included in the condition $\tilde{k}=\tilde{p}$ in (\ref{ND14b}), we
have $g^{\pm s}_{k}|_{k_{2}=p_{2}}=f^{\pm s}_{k}$. In what follows,
we will first evaluate the integration over $z_{1}$ in
(\ref{ND15b}). Using then the orthonormality of the Hermite
polynomials appearing in $P_{p}$ from (\ref{ND9b}), the
$p_{2}$-integration can also be performed. We will eventually end
with an expression for $M_{\sigma}^{2}$, that includes only two
integrations over $p_{0}$ and $p_{3}$ momenta. To start, let us
first rewrite $I_{pk}$ and $J_{kp}^{(0)}$ from (\ref{ND15b}) using
the definition of $P_{p}(x_{1})$ from (\ref{ND9b}) and
(\ref{ND16b}). We get
\begin{eqnarray}\label{ND17b}
I_{pk}(p_{2},k_{2})&\equiv&
\alpha^{+}_{pk}(p_{2},k_{2})+is\gamma^{1}\gamma^{2}\alpha^{-}_{pk}(p_{2},k_{2}),\nonumber\\
J_{kp}^{(0)}(k_{2},p_{2})&\equiv&
A^{+(0)}_{kp}(k_{2},p_{2})+is\gamma^{1}\gamma^{2}A^{-(0)}_{kp}(k_{2},p_{2}),\nonumber\\
\end{eqnarray}
where
\begin{eqnarray}\label{ND18b}
\alpha_{pk}^{\pm}(p_{2},k_{2})&\equiv&\frac{1}{2}[f_{p}^{+s}(0)g_{k}^{+s}(0)\pm\Pi_{p}\Pi_{k}
f^{-s}_{p}(0)g^{-s}_{k}(0)], \nonumber\\
\lefteqn{\hspace{-1.6cm}A^{\pm(0)}_{kp}(k_{2},p_{2})
}\nonumber\\
&&\hspace{-2cm}\equiv\frac{1}{2}\int
dz_{1}[f_{p}^{+s}(z_{1})g_{k}^{+s}(z_{1})\pm\Pi_{p}\Pi_{k}
f^{-s}_{p}(z_{1})g^{-s}_{k}(z_{1})].\nonumber\\
\end{eqnarray}
In this way, the integration over $z_{1}$ in (\ref{ND15b}) reduces
to an integration over $z_{1}$ in $A^{\pm(0)}_{kp}(k_{2},p_{2})$.
The latter can be performed using
\begin{eqnarray}\label{ND19b}
\lefteqn{\hspace{-0.6cm}\int
dz_{1}f_{p}^{+s}(z_{1})g^{+s}_{k}(z_{1})
}\nonumber\\
&&\hspace{-1cm}=\frac{(-1)^{p}2^{k}a^{k-p}e^{-a^{2}}}{\sqrt{2^{k+p}
k! p!}}U\left(-p,1+k-p,2a^{2}\right),
\end{eqnarray}
where $a\equiv \frac{\ell_{B}(p_{2}-k_{2})}{2}$ and
$\ell_{B}=|qeB|^{-1/2}$, and $U(m,n,z)$ is the confluent
hypergeometric function of the second kind \cite{gradshteyn}. This
can, however, be simplified by implementing the condition
$k_{2}=p_{2}$, which is required in (\ref{ND14b}). In this case $a$
vanishes, and (\ref{ND19b}) therefore reduces to
\begin{eqnarray}\label{ND20b}
\int
dz_{1}f_{p}^{+s}(z_{1})g^{+s}_{k}(z_{1})\bigg|_{k_{2}=p_{2}}=\delta_{pk}.
\end{eqnarray}
Plugging this result in (\ref{ND18b}) and using
$\Pi_{p}^{2}=\Pi_{p}$, we arrive at
\begin{eqnarray}\label{ND21b}
A_{kp}^{\pm(0)}(p_{2},k_{2}=p_{2})=\frac{1}{2}\left(1\pm\Pi_{p}\right)\delta_{pk}.
\end{eqnarray}
Plugging further (\ref{ND17b}) in (\ref{ND14b}), and performing the
traces over the $\gamma$-matrices, using
$\mbox{tr}_{s}(\gamma_{\mu}\gamma_{\nu})=4g_{\mu\nu}$ and
$\mbox{tr}_{s}\left(\gamma_{\mu}\gamma_{\nu}\gamma_{\rho}\gamma_{\sigma}\right)=
4\left(g_{\mu\nu}g_{\rho\sigma}-g_{\mu\rho}g_{\nu\sigma}+g_{\mu\sigma}g_{\nu\rho}\right)$,
the $\sigma$-meson squared mass matrix is given by
\begin{eqnarray}\label{ND22b}
\lefteqn{M_{\sigma}^{2}=\frac{1}{2G}+12i\sum_{q}\sum\limits_{p,k=0}^{\infty}\int
\frac{dp_{0}dp_{3}}{(2\pi)^{3}}
}\nonumber\\
&&\times\int
dp_{2}\left\{\frac{\left(\alpha_{pk}^{+}A_{kp}^{+(0)}+\alpha_{pk}^{-}A_{kp}^{-(0)}\right)\left(m^{2}+\bar{p}_{q}\cdot
\bar{k}_{q}\right)}{(\bar{p}^{2}_{q}-m^{2})(\bar{k}^{2}_{q}-m^{2})}\right.\nonumber\\
&&~~~~\left.+\frac{2\bar{p}_{2}\bar{k}_{2}\alpha_{pk}^{-}A_{kp}^{-(0)}}{(\bar{p}^{2}_{q}-m^{2})(\bar{k}^{2}_{q}-m^{2})}\right\}
\bigg|_{\tilde{p}=\tilde{k}}.
\end{eqnarray}
Here, $\bar{p}_{q}^{2}=p_{0}^{2}-2|qeB|p-p_{3}^{2}$ and for
$\tilde{p}=\tilde{k}$,
$\bar{k}_{q}^{2}=p_{0}^{2}-2|qeB|k-p_{3}^{2}$. To perform the
integration over $p_{2}$, we first compute
\begin{eqnarray}\label{ND23b}
W_{pk}^{(0)}\equiv\int dp_2 f_p^{+s}(0)f_k^{+s}(0).
\end{eqnarray}
This can be done using the definition of $f^{+s}_{p}(0)$ in terms of
Hermite polynomials [see (\ref{ND10b}) and (\ref{ND11b})], and their
orthonormality relation
\begin{eqnarray}\label{ND24b}
\int_{-\infty}^{+\infty}d\ell~
e^{-\ell^{2}}H_{p}(\ell)H_{k}(\ell)=\frac{\delta_{pk}}{\ell_{B}a_{k}^{2}},
\end{eqnarray}
leading to
\begin{eqnarray}\label{ND25b}
W_{pk}^{(0)}&=&\frac{a_p a_k}{\ell_B}(-1)^{p+k}\int dp'_2 e^{-
p'^2_2}
H_p(p'_2)H_k(p'_2)\nonumber\\
&=&\frac{\delta_{pk}}{\ell_B^2},
\end{eqnarray}
with $p'_{2}\equiv \ell_{B}p_{2}$. Moreover, we arrive at the useful
relation
\begin{eqnarray}\label{ND26b}
\hspace{-3.5cm}\int
dp_{2}\alpha^{\pm}_{pk}(p_{2},k_{2})A_{kp}^{\pm(0)}(k_{2},p_{2})\big|_{k_{2}=p_{2}}
\nonumber\\
\hspace{2.5cm}=\frac{\delta_{pk}}{4\ell_{B}^{2}}\left(1\pm\Pi_{p}\right)^{2},
\end{eqnarray}
arising from (\ref{ND23b}). Plugging these results in (\ref{ND22b})
and summing over $k$, the $\sigma$-meson squared mass matrix at zero
temperature, chemical potential and non-vanishing magnetic field is
given by
\begin{eqnarray}\label{ND27b}
\lefteqn{M_{\sigma}^{2}=\frac{1}{2G}
}\nonumber\\
&&+6i\sum\limits_{q\in\{\frac{2}{3},-\frac{1}{3}\}}|qeB|\sum\limits_{p=0}^{\infty}\alpha_{p}\int
\frac{dp_{0}dp_{3}}{(2\pi)^{3}}~\frac{(\bar{p}_{q}^{2}+m^{2})}{(\bar{p}_{q}^{2}-m^{2})^{2}},\nonumber\\
\end{eqnarray}
where $\alpha_{p}\equiv 1+\Pi_{p}$ is the same spin degeneracy
factor that appears in (\ref{NE14b}). To introduce the temperature
$T$ and the chemical potential $\mu$, we use the method described at
the beginning of this section [see (\ref{ND12b})]. The mass squared
matrix corresponding to $\sigma$-meson at finite $(T,\mu,eB)$ is
therefore given by
\begin{eqnarray}\label{ND28b}
\lefteqn{\hspace{-1cm}M_{\sigma}^{2}=\frac{1}{2G}-6\sum\limits_{q\in\{\frac{2}{3},-\frac{1}{3}\}}|qeB|
}\nonumber\\
&&\hspace{-1.2cm}\times
\sum_{p=0}^{\infty}\alpha_{p}\int\frac{dp_{3}}{(2\pi)^{2}}
\big[{\cal{S}}_{1}^{(0)}(\omega_{p})+2m^{2}{\cal{S}}_{2}^{(0)}(\omega_{p})\big],
\end{eqnarray}
where $\omega_{p}^{2}\equiv p_{3}^{2}+2|qeB|p+m^{2}$, and
${\cal{S}}_{\ell}^{(0)}(\omega_{p}), \ell=1,2$ are defined by
\begin{eqnarray}\label{ND29b}
{\cal{S}}_{\ell}^{(m)}(\omega_{p})\equiv
T\sum\limits_{n=-\infty}^{+\infty}\frac{(p_{0}^{2})^{m}}{(p_{0}^{2}-\omega_{p}^{2})^{\ell}},
\end{eqnarray}
with $\ell\geq 1, m\geq 0$. Using
\begin{eqnarray}\label{ND30b}
{\cal{S}}_{1}^{(0)}(\omega_{p})=
\frac{1}{2\omega_{p}}[1-N_{f}(\omega_{p})],
\end{eqnarray}
and assuming that ${\cal{S}}_{0}^{(m)}=0, \forall m\geq 0$,
following recursion relations can be used to evaluate
${\cal{S}}_{\ell}^{(m)}(\omega_{p})$ from (\ref{ND29b}) for all
$\ell\geq 1$ and $m\geq 0$,
\begin{eqnarray}\label{ND31b}
{\cal{S}}_{\ell}^{(0)}(\omega_{p})&=&\frac{1}{2(\ell-1)\omega_{p}}\frac{d{\cal{S}}_{\ell-1}^{(0)}(\omega_{p})}
{d\omega_{p}},~~~\forall\ell\geq 2,\nonumber\\
{\cal{S}}_{\ell}^{(m)}(\omega_{p})&=&{\cal{S}}_{\ell-1}^{(m-1)}(\omega_{p})+\omega_{p}^{2}{\cal{S}}_{\ell}^{(m-1)}(\omega_{p}).
\end{eqnarray}
In (\ref{ND30b}), $N_{f}(\omega_{p})\equiv
n_{f}^{+}(\omega_{p})+n_{f}^{-}(\omega_{p})$ and
$n_{f}^{\pm}(\omega_{p})$ are fermionic distribution functions
\begin{eqnarray}\label{ND32b}
n_{f}^{\pm}(\omega_{p})\equiv
\frac{1}{e^{\beta(\omega_{p}\mp\mu)}+1}.
\end{eqnarray}
In the following paragraph, the same method will be used to
determine $M_{\pi^{0}}^{2}$ at zero and nonzero $(T,\mu)$ and for
non-vanishing $eB$.
\subsubsection{$M_{\pi^{0}}^{2}$ at finite $(T,\mu,eB)$}
\par\noindent
To determine the squared mass matrix $M_{\pi^{0}}^{2}$ from
(\ref{ND5b}), corresponding to $\pi^{0}$, we use the definition of
the fermion propagator (\ref{ND8b})-(\ref{ND9b}), and arrive first
at
\begin{eqnarray}\label{ND33b}
\lefteqn{M_{\pi^{0}}^{2}=\frac{1}{2G} -i\int
d^{4}z\sum\limits_{p,k=0}^{\infty}\int{\cal{D}}\tilde{p}~{\cal{D}}\tilde{k}~e^{-iz\cdot(\tilde{p}-\tilde{k})}
}\nonumber\\
&&\times
~\mbox{tr}_{sfc}\bigg[D_{Q}^{-1}(\bar{p})P_{p}(0)\tau_{3}\gamma_{5}K_{k}(0)D^{-1}_{Q}(\bar{k})\nonumber\\
&&\hspace{1.5cm}\times K_{k}(z_{1})
\gamma_{5}\tau_{3}P_{p}(z_{1})\bigg].
\end{eqnarray}
Using the anticommutation relation $\{\gamma_{5},\gamma_{\mu}\}=0$
leading to $[\gamma_{5},K_{k}]=0$, we simplify first the combination
$\gamma_{5}K_{k}(0)D^{-1}_{Q}(\bar{k})K_{k}(z_{1})\gamma_{5}$ in
(\ref{ND33b}), and arrive at
\begin{eqnarray}\label{ND34b}
\gamma_{5}K_{k}(0)D^{-1}_{Q}(\bar{k})K_{k}(z_{1})\gamma_{5}
=-K_{k}(0)\frac{1}{\gamma\cdot \bar{k}+m}K_{k}(z_{1}). \hspace{-0.5cm}\nonumber\\
\end{eqnarray}
Plugging this relation in (\ref{ND33b}) and performing the
integration over $z_{i}, i=0,2,3$, we arrive at
\begin{eqnarray}\label{ND35b}
\lefteqn{M_{\pi^{0}}^{2}=\frac{1}{2G}+3i
}\nonumber\\
&&\times\sum\limits_{p,k=0}^{\infty}\int
\frac{dp_{0}dp_{3}}{(2\pi)^{3}}\int
dp_{2}~\mbox{tr}_{fs}\left\{\frac{1}{\gamma\cdot\bar{p}-m}I_{pk}(p_{2},k_{2})\right.\nonumber\\
&&\left.\times \tau_{3}
\frac{1}{\gamma\cdot\bar{k}+m}\tau_{3}J^{(0)}_{kp}(k_{2},p_{2})
\right\}\bigg|_{\tilde{k}=\tilde{p}},
\end{eqnarray}
where $I_{pk}(p_{2},k_{2})$ and $J_{kp}^{(0)}(k_{2},p_{2})$ are
given in (\ref{ND15b}). We follow the same method leading from
(\ref{ND14b}) to (\ref{ND27b}) to evaluate the traces over the
$\gamma$-matrices and to perform the integrations over $z_{1}$ and
$p_{2}$ in (\ref{ND35b}). We arrive after a lengthy but
straightforward computation at
\begin{eqnarray}\label{ND36b}
M_{\pi^{0}}^{2}&=&\frac{1}{2G}+6i\sum\limits_{q\in\{\frac{2}{3},-\frac{1}{3}\}}
|qeB|\nonumber\\
&&\times\sum\limits_{p=0}^{\infty}\alpha_{p}\int\frac{dp_{0}dp_{3}}{(2\pi)^{3}}\frac{1}{(\bar{p}_{q}^{2}-m^{2})}.
\end{eqnarray}
Thus, the mass squared matrix corresponding to $\pi^{0}$ at finite
$(T,\mu,eB)$ is given by
\begin{eqnarray}\label{ND37b}
M_{\pi^{0}}^{2}&=&\frac{1}{2G}-6\sum_{q\in\{\frac{2}{3},-\frac{1}{3}\}}|qeB|\nonumber\\
&&\times\sum\limits_{p=0}^{\infty}\alpha_{p}
\int\frac{dp_{3}}{(2\pi)^{2}}{\cal{S}}_{1}^{(0)}(\omega_{p}),
\end{eqnarray}
where ${\cal{S}}_{1}^{(0)}(\omega_{p})$ is given in (\ref{ND30b}).
In Sec. \ref{sec5}, the integration over $p_{3}$ and the summation
over Landau level $p$, appearing in (\ref{ND37b}), will be performed
numerically.
\subsection{$({\cal{G}}^{\mu\nu}, {\cal{F}}^{\mu\nu})$ at finite $(T,\mu,eB)$}\label{sec4p2}
\subsubsection{${\cal{G}}^{\mu\nu}$ at finite $(T,\mu,eB)$}
\par\noindent
We start by computing ${\cal{G}}^{\mu\nu}$ from (\ref{ND6b}) at zero
$(T,\mu)$ but non-vanishing $eB$. To do this, we use the definition
of the Ritus propagator (\ref{ND8b}), and arrive first at
\begin{eqnarray}\label{ND38b}
\lefteqn{{\cal{G}}^{\mu\nu}=\frac{i}{2}\sum_{q}\int
d^{4}z~z^{\mu}z^{\nu}\sum\limits_{p,k=0}^{\infty}\int{\cal{D}}\tilde{p}~{\cal{D}}\tilde{k}~e^{-iz\cdot(\tilde{p}-\tilde{k})}
}\nonumber\\
&&\times\mbox{tr}_{sc}\bigg[D_{q}^{-1}(\bar{p})P_{p}(0)K_{k}(0)D^{-1}_{q}(\bar{k})K_{k}(z_{1})P_{p}(z_{1})\bigg].\nonumber\\
\end{eqnarray}
After performing the integration over $z_{i}, i=0,2,3$, and using
the definition of $D^{-1}_{q}$, the diagonal elements of
${\cal{G}}^{\mu\nu}$ are given by
\begin{eqnarray}\label{ND39b}
\lefteqn{{\cal{G}}^{jj}=-\frac{i}{2}\sum_{q}\sum_{k,r=0}^{\infty}\int
{\cal{D}}\tilde{k}~\frac{\partial^{2}}{\partial\ell_{j}^{2}}
\mbox{tr}_{sc}\bigg[\frac{1}{(\gamma\cdot
\bar{r}_{q}-m)}
}\nonumber\\
&&\times
I_{rk}(r_{2},k_{2})\frac{1}{(\gamma\cdot\bar{k}_{q}-m)}J_{kr}^{(0)}
(k_{2},r_{2})\bigg]\Bigg|_{ \tilde{\ell}=0
},\nonumber\\
\lefteqn{{\cal{G}}^{11}=+\frac{i}{2}\sum_{q}\sum_{k,p=0}^{\infty}\int
{\cal{D}}\tilde{p}~\mbox{tr}_{sc}\bigg[\frac{1}{(\gamma\cdot
\bar{p}_{q}-m)}
}\nonumber\\
&&\times I_{pk}(p_{2},k_{2})\frac{1}{(\gamma\cdot\bar{k}_{q}-m)}J_{kp}^{(2)}(k_{2},p_{2})\bigg]\Bigg|_{\tilde{k}=\tilde{p}},\nonumber\\
\lefteqn{
{\cal{G}}^{22}=-\frac{i}{2}\sum_{q}\sum_{k,p=0}^{\infty}\int
{\cal{D}}\tilde{p}~\mbox{tr}_{sc}\bigg[\frac{1}{(\gamma\cdot
\bar{p}_{q}-m)}
}\nonumber\\
&&\times\frac{\partial^{2}}{\partial
p_{2}^{2}}\bigg[I_{pk}(p_{2},k_{2})\frac{1}{(\gamma\cdot\bar{k}_{q}-m)}J_{kp}^{(0)}(k_{2},p_{2})\bigg]\bigg]
\Bigg|_{\tilde{k}=\tilde{p}}.\nonumber\\
\end{eqnarray}
In ${\cal{G}}^{jj}$, $j=0,3$, $\bar{r}_{q}\equiv
\bar{k}_{q}+\bar{\ell}_{q}$ and $r\equiv k+\ell$. Moreover, two
functions $I_{pk}(p_{2},k_{2})$ and $J^{(0)}_{kp}(k_{2},p_{2})$ are
defined in (\ref{ND15b}), and $J^{(2)}_{kp}(k_{2},p_{2})$ is defined
by
\begin{eqnarray}\label{ND40b}
J_{kp}^{(2)}(k_{2},p_{2})\equiv \int dz_{1} z_{1}^{2}
K_{k}(z_{1})P_{p}(z_{1}),
\end{eqnarray}
with $P_{p}$ and $K_{k}$ given in (\ref{ND9b}) and (\ref{ND16b}),
respectively. Following the method presented in the first part of
this section, leading from (\ref{ND14b}) to (\ref{ND27b}), all
non-diagonal elements of ${\cal{G}}^{\mu\nu}$ turn out to vanish,
and therefore, as it is claimed in Sec. \ref{sec2},
${\cal{G}}^{\mu\nu}={\cal{G}}^{\mu\mu}g^{\mu\nu}$ (no summation over
$\mu$). This is similar to what also happens in the single-flavor
NJL model \cite{miransky1995}. We therefore focus on
${\cal{G}}^{\mu\mu}, \mu=0,\cdots,3$ from (\ref{ND39b}), which shall
be evaluated using the same method as before. Evaluating the
$\bar{k}$-integration in ${\cal{G}}^{jj}, j=0,3$ from (\ref{ND39b}),
using an additional Feynman parametrization, we arrive first at
\begin{eqnarray}\label{ND41b}
\lefteqn{
{\cal{G}}^{00}=-{\cal{G}}^{33}=3i\sum\limits_{q\in\{\frac{2}{3},
-\frac{1}{3}\}}|qeB|
}\nonumber\\
&&\times\sum\limits_{p=0}\alpha_{p}\int
\frac{dp_{0}dp_{3}}{(2\pi)^{3}}\left\{\frac{1}{(\bar{p}_{q}^{2}-m^{2})^{2}}+
\frac{4}{3}\frac{m^{2}}{(\bar{p}_{q}^{2}-m^{2})^{3}}
\right\}.\nonumber\\
\end{eqnarray}
At finite $(T,\mu, eB)$, we therefore have
\begin{eqnarray}\label{ND42b}
\lefteqn{
{\cal{G}}^{00}=-{\cal{G}}^{33}=-3\sum\limits_{q\in\{\frac{2}{3},-\frac{1}{3}\}}|qeB|}\nonumber\\
&&\times\sum_{p=0}^{\infty}\alpha_{p}\int\frac{dp_{3}}{(2\pi)^{2}}
\left\{{\cal{S}}_{2}^{(0)}(\omega_{p})
+\frac{4}{3}m^{2}{\cal{S}}_{3}^{(0)}(\omega_{p})\right\}.\nonumber\\
\end{eqnarray}
To determine ${\cal{G}}^{11}$ from (\ref{ND39b}), we shall first
evaluate the $z_{1}$ integration in $J_{kp}^{(2)}(k_{2},p_{2})$ from
(\ref{ND40b}) at $k_{2}=p_{2}$, as it is required from
(\ref{ND39b}). To do this, we first define
\begin{eqnarray}\label{ND43b}
J_{kp}^{(2)}(k_{2},p_{2})\equiv
A_{kp}^{+(2)}(k_{2},p_{2})+i\gamma^{1}\gamma^{2}sA_{kp}^{-(2)}(k_{2},p_{2}),\nonumber\\
\end{eqnarray}
with
\begin{eqnarray}\label{ND44b}
\lefteqn{\hspace{-1.3cm}A_{kp}^{\pm(2)}(k_{2},p_{2}) }\nonumber\\
&&\hspace{-1.5cm}\equiv
\frac{1}{2}\bigg[{\cal{L}}_{kp}(k_{2},p_{2})\pm
\Pi_{p}\Pi_{k}{\cal{L}}_{k-1,p-1}(k_{2},p_{2})\bigg].
\end{eqnarray}
Here, ${\cal{L}}_{kp}(k_{2},p_{2})$ is defined by
\begin{eqnarray}\label{ND45b}
\hspace{-.2cm}{\cal{L}}_{kp}(k_{2},p_{2})\equiv\int
dz_{1}~[z_{1}g_{k}^{+s}(z_{1})][z_{1}f_{p}^{+s}(z_{1})].
\end{eqnarray}
To determine ${\cal{L}}_{kp}$ for $k_{2}=p_{2}$, we use the
definition of $f^{+s}_{p}$ from (\ref{ND10b}) in terms of the
Hermite polynomials $H_{p}$ and their standard recursion relations $
\frac{d H_{k}(x)}{dx}=2k H_{k-1}(x)$ and $H_{k+1}(x)=2x H_{k}(x)-2
kH_{k-1}(x)$, to arrive first at
\begin{eqnarray}\label{ND46b}
\lefteqn{z_{1}f_{p}^{+s}(z_{1})}\nonumber\\
&&=\ell_{B}\left(C_{p+1}f_{p+1}^{+s}(z_{1})+C_{p}f_{p-1}^{+s}(z_{1})+p'_{2}f_{p}^{+s}(z_{1})\right),\nonumber\\
\end{eqnarray}
where $C_{p}\equiv \sqrt{\frac{p}{2}}$ and
$p'_{2}\equiv\ell_{B}p_{2}$. Replacing (\ref{ND46b}) in
(\ref{ND45b}), setting $k_{2}=p_{2}$, and integrating over $z_{1}$,
we get
\begin{eqnarray}\label{ND47b}
\lefteqn{{\cal{L}}_{kp}(k_{2}=p_{2},p_{2})=\ell_{B}^{2}\bigg[\left(C_{2p+1}^{2}+p_{2}^{'2}\right)\delta_{kp}
}\nonumber\\
&&+
C_{p}C_{p-1}\delta_{k,p-2}+C_{p+1}C_{p+2}\delta_{k,p+2}+2p'_{2}\left(C_{p}\delta_{k,p-1}\right.\nonumber\\
&&\left.+C_{p+1}\delta_{k,p+1}\right)\bigg].
\end{eqnarray}
Thus, $A_{kp}^{\pm(2)}(p_{2},p_{2})$ in (\ref{ND43b}) are given by
\begin{eqnarray}\label{ND48b}
\lefteqn{\hspace{-1cm}A_{kp}^{\pm(2)}(k_{2}=p_{2},p_{2})=\frac{\ell_{B}^{2}}{2}\bigg[C^{\pm}\delta_{kp}}\nonumber\\
&&\qquad+C_{p-1}\left(C_{p}\pm \Pi_{p}\Pi_{k}
C_{p-2}\right)\delta_{k,p-2}\nonumber\\
&&\qquad +C_{p+1}\left(C_{p+2}\pm
\Pi_{p}\Pi_{k}C_{p}\right)\delta_{k,p+2}
\nonumber\\
&&\qquad+2p'_{2}\left( C_{p}\pm \Pi_{p}\Pi_{k}
C_{p-1}\right)\delta_{k,p-1}\nonumber\\
&&\qquad+2p'_{2}\left(C_{p+1}\pm \Pi_{p}\Pi_{k}
C_{p}\right)\delta_{k,p+1}\bigg],
\end{eqnarray}
where the coefficients $C^{\pm}\equiv
D^{\pm}+p_{2}^{'2}(1\pm\Pi_{p})$ with $D^{\pm}\equiv C_{2p+1}^{2}\pm
C_{2p-1}^{2}\Pi_{p}$. Plugging (\ref{ND48b}) in (\ref{ND43b}) and
the resulting expression in ${\cal{G}}^{11}$ from (\ref{ND39b}), and
performing the trace over $\gamma$-matrices, we arrive at
\begin{eqnarray}\label{ND49b}
\lefteqn{{\cal{G}}^{11}=6i\sum_{q}\sum\limits_{p,k=0}^{\infty}\int
\frac{dp_{0}dp_{3}}{(2\pi)^{3}} }\nonumber\\
&&\times \int
dp_{2}\left\{\frac{\left(\alpha_{pk}^{+}A_{kp}^{+(2)}+\alpha_{pk}^{-}A_{kp}^{-(2)}\right)\left(\bar{p}_{q}\cdot
\bar{k}_{q}+m^{2}\right)}{(\bar{p}_{q}^{2}-m^{2})(\bar{k}_{q}^{2}-m^{2})}
\right.\nonumber\\
&&~~~~+\left.\frac{2\bar{p}_{2}\bar{k}_{2}\alpha_{pk}^{-}A_{kp}^{-(2)}}{(\bar{p}_{q}^{2}-m^{2})(\bar{k}_{q}^{2}-m^{2})}
\right\}\bigg|_{\tilde{k}=\tilde{p}},
\end{eqnarray}
where $\alpha^{\pm}_{pk}$ are defined in (\ref{ND18b}). The
integration over $p_{2}$ is then performed using
\begin{eqnarray}\label{ND50b}
W^{(1)}_{pk}&\equiv&\int
dp_{2}p'_{2}f_{p}^{+s}(0)f_{k}^{+s}(0)\nonumber\\
&=&-\frac{1}{\ell_{B}^{2}}\left(C_{p+1}\delta_{k,p+1}+C_{p}\delta_{k,p-1}\right),\nonumber\\
W^{(2)}_{pk}&\equiv&\int
dp_{2}p^{'2}_{2}f_{p}^{+s}(0)f_{k}^{+s}(0)\nonumber\\
&=&+\frac{1}{\ell_{B}^{2}}\left(C_{2p+1}^{2}\delta_{kp}+C_{p+2}C_{p+1}
\delta_{k,p+2}\right.\nonumber\\
&&\left.+C_{p}C_{p-1}\delta_{k,p-2}\right).
\end{eqnarray}
These results arise from the orthonormality relations of the Hermite
polynomials (\ref{ND24b}), in the same way that $W^{(0)}_{pk}$ from
(\ref{ND23b}) is derived. Using $W_{pk}^{(1)}$ and $W_{pk}^{(2)}$
from (\ref{ND50b}), we get
\begin{eqnarray}\label{ND51b}
\lefteqn{\hspace{-0.5cm}\int dp_{2}\
\alpha^{\pm}_{pk}A^{\pm(2)}_{kp}|_{\tilde{k}=\tilde{p}}=\frac{1}{4}\bigg[
\left(D^{\pm}+C_{2p+1}^{2}\pm\Pi_{p}C_{2p-1}^{2}\right)}\nonumber\\
&&\times(1\pm\Pi_{p})\delta_{kp}-2\left(C_{p}\pm\Pi_{p}\Pi_{k}C_{p-1}\right)^{2}\delta_{k,p-1}\nonumber\\
&&-2\left(C_{p+1}\pm\Pi_{p}\Pi_{k}C_{p}\right)^{2}
\delta_{k,p+1}\bigg].
\end{eqnarray}
Plugging these relations in (\ref{ND49b}), we finally arrive at
\begin{eqnarray}\label{ND52b}
\lefteqn{{\cal{G}}^{11}=3i
}\nonumber\\
&&\times
\sum\limits_{q}\sum_{p,k=0}^{\infty}\int\frac{dp_{0}dp_{3}}{(2\pi)^{3}}
\bigg\{\frac{\big(\bar{p}_{q}\cdot\bar{k}_{q}+m^{2}\big)}{(\bar{p}_{q}^{2}-m^{2})(\bar{k}_{q}^{2}-m^{2})}C^{(1)}_{pk}\nonumber\\
&&-\frac{\bar{p}_{2}\bar{k}_{2}}
{(\bar{p}_{q}^{2}-m^{2})(\bar{k}_{q}^{2}-m^{2})}C^{(2)}_{pk}
\bigg\}\bigg|_{\tilde{k}=\tilde{p}},
\end{eqnarray}
where
\begin{eqnarray}\label{ND53b}
\lefteqn{C^{(1)}_{pk}\equiv[(2p+1)+\Pi_{p}(2p-1)]\delta_{kp}}\nonumber\\
&&\hspace{-0.3cm}-[p+\Pi_{p}\Pi_{k}(p-1)]\delta_{k,p-1}-[(p+1)+\Pi_{p}\Pi_{k}p]\delta_{k,p+1},\nonumber\\
\lefteqn{C^{(2)}_{pk}\equiv-[(2p+1)-(2p-1)\Pi_{p}](1-\Pi_{p})\delta_{pk}}\nonumber\\
&&\hspace{-0.3cm}+[p+\Pi_{p}\Pi_{k}(p-1-2\sqrt{p(p-1)})]\delta_{k,p-1}\nonumber\\
&&\hspace{-0.3cm}+[(p+1)+\Pi_{p}\Pi_{k}(p-2\sqrt{p(p+1)})]\delta_{k,p+1}.
\end{eqnarray}
To determine ${\cal{G}}^{22}$ from (\ref{ND39b}), we perform the
traces over the $\gamma$-matrices and arrive first at
\begin{eqnarray}\label{ND54b}
\lefteqn{{\cal{G}}^{22}=-6i\sum\limits_{q\in\{\frac{2}{3},-\frac{1}{3}\}}\sum_{p,k=0}^{\infty}
\int \frac{dp_0 dp_3}{(2\pi)^3}
}\nonumber\\
&&\times\int
dp_2\left\{\frac{\big(\bar{p}_{q}\cdot\bar{k}_{q}+m^{2}\big)N^{(1)}_{pk}+2\bar{p}_{2}\bar{k}_{2}N^{(2)}_{pk}}
{(\bar{p}_{q}^{2}-m^{2})(\bar{k}_{q}^{2}-m^{2})}\right\}
\bigg|_{\tilde{k}=\tilde{p}},\nonumber\\
\end{eqnarray}
where
\begin{eqnarray*}
N^{(1)}_{pk}(p_{2},k_{2})&\equiv&\frac{d^{2}}{dp_{2}^{2}}\left(\alpha_{pk}^{+}A_{kp}^{+(0)}+\alpha_{pk}^{-}A_{kp}^{-(0)}\right)
,\nonumber\\
N^{(2)}_{pk}(p_{2},k_{2})&\equiv&\frac{d^{2}}{dp_{2}^{2}}\left(\alpha_{pk}^{-}A_{kp}^{-(0)}\right).
\end{eqnarray*}
Plugging the definitions of $\alpha_{pk}^{\pm}(p_{2},k_{2})$ and
$A_{kp}^{\pm(0)}(p_{2},k_{2})$ from (\ref{ND18b}) in (\ref{ND54b}),
and performing the integration over $p_{2}$ in (\ref{ND54b}) by
making use of $W^{(0)}_{pk}$ from (\ref{ND25b}), we arrive after a
lengthy but straightforward computation at
\begin{eqnarray}\label{ND55b}
\int
dp_{2}N^{(1)}_{pk}(p_{2},k_{2}=p_{2})&=&-\frac{1}{2}C_{pk}^{(1)},\nonumber\\
\int
dp_{2}N^{(2)}_{pk}(p_{2},k_{2}=p_{2})&=&\frac{1}{4}C_{pk}^{(2)},
\end{eqnarray}
where $C^{(1)}_{pk}$ and $C^{(2)}_{pk}$ are given in (\ref{ND53b}).
This leads eventually to
\begin{eqnarray}\label{ND56b}
{\cal{G}}^{22}={\cal{G}}^{11},
\end{eqnarray}
with ${\cal{G}}^{11}$ given in (\ref{ND52b}). Note that the equality
${\cal{G}}^{11}={\cal{G}}^{22}$ arises also in a single-flavor NJL
model in \cite{miransky1995}, where the form factors of the
effective kinetic term are computed at zero temperature and chemical
potential and in the regime of LLL dominance. In (\ref{ND56b}), this
regime is characterized by $k=p=0$, where $k$ and $p$ label the
Landau levels. At finite $(T,\mu)$, ${\cal{G}}^{11}={\cal{G}}^{22}$
is therefore given by
\begin{eqnarray}\label{ND57b}
\lefteqn{\hspace{-0.8cm}{\cal{G}}^{11}={\cal{G}}^{22}=-3\sum\limits_{q\in\{\frac{2}{3},-\frac{1}{3}\}}\sum_{p=0}^{\infty}
\int\frac{dp_3}{(2\pi)^{2}}
\left\{8m^{2}p{\cal{S}}_{2}^{(0)}(\omega_{p}) \right.}\nonumber\\
&&\left.
+2[(2p+1)\ell_{B}^{2}m^{2}+p]~{\cal{S}}_{1}^{(0)}(\omega_{p})\right.\nonumber\\
&&\left.-2[(2p+1)\ell_{B}^{2}m^{2}+(p+1)]~{\cal{S}}_{1}^{(0)}(\omega_{p+1})\right.\nonumber\\
&&\left.+\delta_{p0}[{\cal{S}}_{1}^{(0)}(\omega_{p})
+2m^{2}{\cal{S}}_{2}^{(0)}(\omega_{p})]\right\},
\end{eqnarray}
where ${\cal{S}}_{1}^{(0)}(\omega_{p})$ is given in (\ref{ND30b})
and ${\cal{S}}_{2}^{(0)}(\omega_{p})$ can be evaluated using the
recursion relations (\ref{ND31b}).
\subsubsection{${\cal{F}}^{\mu\nu}$ at finite $(T,\mu,eB)$}
\par\noindent
We start the computation of the elements of the matrix
${\cal{F}}^{\mu\nu}$ by considering its definition from
(\ref{ND7b}), and arrive after plugging the Ritus propagator
(\ref{ND8b}) in (\ref{ND7b}) at
\begin{eqnarray}\label{ND58b}
\lefteqn{\hspace{-0.8cm}{\cal{F}}^{\mu\nu}=-\frac{i}{2}\int
d^{4}z~z^{\mu}z^{\nu}\sum\limits_{p,k=0}^{\infty}\int{\cal{D}}\tilde{p}~{\cal{D}}\tilde{k}~e^{-iz\cdot(\tilde{p}-\tilde{k})}
}\nonumber\\
&&\times
\mbox{tr}_{sfc}\bigg[D_{Q}^{-1}(\bar{p})P_{p}(0)\tau_{3}\gamma_{5}K_{k}(0)D^{-1}_{Q}(\bar{k})\nonumber\\
&&\qquad~~~~\times K_{k}(z_{1})
\gamma_{5}\tau_{3}P_{p}(z_{1})\bigg].
\end{eqnarray}
Using (\ref{ND34b}) and following the same method as is used to
determine ${\cal{G}}^{\mu\nu}$ in the previous section, we arrive
after some work at
\begin{eqnarray}\label{ND59b}
\lefteqn{\hspace{-1cm}{\cal{F}}^{00}=-{\cal{F}}^{33}=3i\sum\limits_{q\in\{\frac{2}{3},-\frac{1}{3}\}}|qeB|}\nonumber\\
&&\times
\sum\limits_{p=0}^{\infty}\alpha_{p}\int\frac{dp_{0}dp_{3}}{(2\pi)^{3}}
\frac{1}{(\bar{p}_{q}^{2}-m^{2})^{2}},
\end{eqnarray}
and
\begin{eqnarray}\label{ND60b}
\lefteqn{{\cal{F}}^{11}={\cal{F}}^{22}}\nonumber\\
&&=3i\sum\limits_{q}\sum\limits_{p,k=0}^{\infty}\int\frac{dp_{0}dp_{3}}{(2\pi)^{3}}
\bigg[\frac{(\bar{p}_{q}\cdot\bar{k}_{q}-m^{2})}{(\bar{p}_{q}^{2}-m^{2})(\bar{k}_{q}^{2}-m^{2})}C_{pk}^{(1)}\nonumber\\
&&~~~-
\frac{2|qeB|\sqrt{pk}}{(\bar{p}_{q}^{2}-m^{2})(\bar{k}_{q}^{2}-m^{2})}C_{pk}^{(2)}\bigg]\bigg|_{\tilde{k}=\tilde{p}}.
\end{eqnarray}
At finite $(T,\mu)$, ${\cal{F}}^{\mu\mu}, \mu=0,\cdots,3$ are
therefore given by
\begin{eqnarray}\label{ND61b}
\lefteqn{\hspace{0cm} {\cal{F}}^{00}=-{\cal{F}}^{33}
}\nonumber\\
&&=-3\sum\limits_{q\in\{\frac{2}{3},-\frac{1}{3}\}}|qeB|\sum_{p=0}^{\infty}\alpha_{p}\int\frac{dp_{3}}{(2\pi)^{2}}~
{\cal{S}}_{2}^{(0)}(\omega_{p}),\nonumber\\
\end{eqnarray}
as well as
\begin{eqnarray}\label{ND62b}
\lefteqn{{\cal{F}}^{11}={\cal{F}}^{22}}\nonumber\\
&&\hspace{-0.3cm}=-3\sum\limits_{q\in\{\frac{2}{3},-\frac{1}{3}\}}\sum_{p=0}^{\infty}
\int\frac{dp_{3}}{(2\pi)^{2}}\bigg\{2\big[p~{\cal{S}}_{1}^{(0)}(\omega_{p})\nonumber\\
&&-
(p+1)~{\cal{S}}_{1}^{(0)}(\omega_{p+1})\big]+\delta_{p0}{\cal{S}}_{1}^{(0)}(\omega_{p})\bigg\}.
\end{eqnarray}
All non-diagonal elements of ${\cal{F}}^{\mu\nu}$ turn out to
vanish. As we have described before, the remaining
$p_{3}$-integration and the summation over Landau levels appearing
in the final results of Secs. \ref{sec4p1} and \ref{sec4p2} for the
squared mass matrices $(M_{\sigma}^{2}, M_{\pi^{0}}^{2})$ as well as
form factors (kinetic coefficients) $({\cal{G}}^{\mu\nu},
{\cal{F}}^{\mu\nu})$, will be evaluated numerically in the next
section. Using these results, the $(T,\mu,eB)$ dependence of pole
and screening masses as well as the refraction indices of neutral
mesons will be explored.
\section{Numerical Results}\label{sec5}
\par\noindent
In Sec. \ref{sec3}, we have introduced the one-loop effective action
$\Gamma_{\mbox{\tiny{eff}}}[\sigma,\vec{\pi}]$ of a two-flavor NJL
model describing the dynamics of non-interacting $\sigma$ and
$\vec{\pi}$ mesons in a hot and magnetized medium. We have then
determined the corresponding one-loop effective potential of this
model $\Omega_{\mbox{\tiny{eff}}}(m;T,\mu,eB)$, up to an integration
over $p_{3}$-momentum and a summation over Landau levels, labeled by
$p$. According to our description in Sec. \ref{sec2}, the global
minima of $\Omega_{\mbox{\tiny{eff}}}(m;T,\mu,eB)$ can be used to
determine the squared mass matrices $(M_{\sigma}^{2},
M_{\pi^{0}}^{2})$ and the coefficients of the form factors (kinetic
coefficients) $({\cal{G}}^{\mu\nu}, {\cal{F}}^{\mu\nu})$,
corresponding to neutral mesons and appearing in the effective
action (\ref{ND1b}). In Sec. \ref{sec4}, we have described the
analytical method leading to $(M_{\sigma}^{2}, M_{\pi^{0}}^{2})$ and
$({\cal{G}}^{\mu\nu}, {\cal{F}}^{\mu\nu})$ at finite $(T,\mu,eB)$.
The squared mass matrices are given in (\ref{ND28b}) as well as
(\ref{ND37b}) and the form factors in (\ref{ND57b}), (\ref{ND61b})
as well as (\ref{ND62b}). All these results are presented up to an
integration over $p_{3}$-momentum and a summation over Landau levels
$p$. In this section, we will first use the one-loop effective
potential (\ref{NE14b}), to determine numerically the
$(T,\mu,eB)$-dependence of the constituent quark mass
$m=m_{0}+\sigma_{0}$ for non-vanishing bare quark mass $m_{0}$. This
will be done in Sec. \ref{sec5p1} by keeping one of these three
parameters fixed and varying two other parameters. We then continue
to explore the complete phase portrait of our magnetized two-flavor
NJL model in the chiral limit $m_{0}\to 0$. Our results are
comparable with the results previously presented in \cite{sato1997,
inagaki2003}. Similar results are also obtained in
\cite{fayazbakhsh2010}, where the two-flavor NJL model, used in the
present paper, is considered with additional diquark degrees of
freedom to study the chiral and color-superconductivity phases in a
hot and magnetized quark matter. In Sec. \ref{sec5p2}, we will then
evaluate the above mentioned $p_{3}$-integration and the summation
over Landau levels numerically. This gives us the possibility to
study, in particular, the $T$-dependence of $(M_{\sigma}^{2},
M_{\pi^{0}}^{2})$ as well as $({\cal{G}}^{\mu\nu},
{\cal{F}}^{\mu\nu})$ for $\mu=0$ and various $eB=0,0.03, 0.2, 0.3$
GeV$^{2}$ (or equivalently $eB\simeq 0, 1.5m_{\pi}^{2},
10.5m_{\pi}^{2}, 15.7 m_{\pi}^{2}$ for $m_{\pi}=138$ MeV). As we
have described in Sec. \ref{sec1}, the magnetic fields produced in
the con-central heavy ion collisions at RHIC and LHC are estimated
to be in the order of $eB\sim 1.5 m_{\pi}^{2}$ and $eB\sim 15
m_{\pi}^{2}$ (or equivalently, $eB\sim 0.03$ GeV$^{2}$ and $eB\sim
0.3$ GeV$^{2}$, respectively) \cite{mclerran2007, skokov2010}.
Hence, our results for small values of magnetic fields (here,
$eB=0.03$ GeV$^{2}$) may be relevant for the physics of heavy ion
collisions at RHIC, while our results in the intermediate magnetic
fields (here, $eB= 0.2, 0.3$ GeV$^{2}$) seem to be relevant for the
heavy ion collision at LHC. In Sec. \ref{sec5p3}, we will finally
present a number of applications of the results presented in the
second part of this section. In particular, we will determine the
$T$-dependence of the pole mass as well as the refraction index and
screening mass of neutral mesons for $\mu=0$ and $eB=0,0.03, 0.2,
0.3$ GeV$^{2}$. To do this, we will use the corresponding dispersion
relations of $\sigma$- and $\pi^{0}$ mesons. The goal is to study
the effect of uniform magnetic fields on meson masses and refraction
indices and explore the interplay between the effects of temperature
and the external magnetic fields on these quantities. We will, in
particular, show that uniform magnetic fields induce a certain
anisotropy in the mesons refraction indices and the screening masses
in the longitudinal and transverse directions with respect to the
external magnetic field. Detailed studies on $eB$ and $\mu$
dependence of all the above physical quantities, together with other
possible applications of $(M_{\sigma}^{2}, M_{\pi^{0}}^{2})$ and
$({\cal{G}}^{\mu\nu}, {\cal{F}}^{\mu\nu})$, e.g. in studying the
mass splitting between charged pion masses will be presented
elsewhere \cite{sadooghi2012-3}.\footnote{The mass splitting between
$\pi^{+}$ and $\pi^{-}$ is recently discussed in
\cite{andersen2011-pions, anderson2012-2}, using chiral perturbation
theory in the presence of constant magnetic field.}
\subsection{Chiral condensate and complete phase portrait of a magnetized and hot two-flavor NJL model in the chiral limit}\label{sec5p1}
\setcounter{equation}{0}
\begin{figure*}[hbt]
\includegraphics[width=5.5cm,height=4cm]{fig1a-chiralmass-T-eB0-02-05-mu0.eps}
\includegraphics[width=5.5cm,height=4cm]{fig1b-chiralmass-mu-eB-0-02-05-T120.eps}
\includegraphics[width=5.5cm,height=4cm]{fig1c-chiralmass-eB-T60-120-mu0.eps}
\caption{(a) and (b) The $T$ and $\mu$ dependence of the constituent
quark mass $m=m_{0}+\sigma_{0}$ for fixed $eB=0,0.2, 0.5$ GeV$^{2}$
and for fixed $\mu=0$ and $T=120$, respectively. Here, $m_{0}\simeq
5$ MeV is the bare quark mass and $\sigma_{0}$ is the chiral
condensate. (c) The $eB$ dependence of $m$ is demonstrated for fixed
$\mu=0$ and various $T=60, 180$ and $220$ MeV.}\label{fig1}
\end{figure*}
\par\noindent
Using the thermodynamic potential from (\ref{NE14b}), we will
determine, in what follows, the chiral condensate and the complete
phase portrait of the two-flavor NJL model at finite $T,\mu$ and
$eB$. The notations and mathematical method used in this paragraph
are similar to what was previously used in \cite{fayazbakhsh2010}.
To determine the chiral condensate, we have to solve the gap
equation numerically
\begin{eqnarray}\label{NA1}
\frac{\partial\Omega_{\mbox{\tiny{eff}}}(\bar{m};T,\mu,eB)}{\partial
\bar{m}}\bigg|_{\bar{m}=m}=0.
\end{eqnarray}
Here, $\bar{m}=m_{0}+\sigma$ and $m=m_{0}+\sigma_{0}$, as are
introduced in Sec. \ref{sec3}. Our specific choice of parameters is
\cite{buballa2004}
\begin{eqnarray}\label{NA2}
\Lambda=0.6643~\mbox{GeV},~G=4.668~\mbox{GeV}^{-2},
m_{0}=5~\mbox{MeV},\nonumber\\
\end{eqnarray}
where $\Lambda$ is the UV momentum cutoff and $G$ is the NJL
(chiral) coupling constant. To perform the momentum integration over
${\mathbf{p}}$ and $p_{3}$, we have introduced, as in
\cite{fayazbakhsh2010}, smooth cutoff functions
\begin{eqnarray}\label{NA3}
f_{\Lambda}&=&\frac{1}{1+\exp\left(\frac{|{\mathbf{p}}|-\Lambda}{A}\right)},\nonumber\\
f_{\Lambda,B}^{p}&=&\frac{1}{1+\exp\left(\frac{\sqrt{p_{3}^{2}+2|qeB|p}-\Lambda}{A}\right)},
\end{eqnarray}
corresponding to integrals with vanishing and non-vanishing magnetic
fields, respectively. In $f_{\Lambda,B}^{p}$, $p$ labels the Landau
levels. Moreover, $A$ is a free parameter, which determines the
sharpness of the cutoff scheme. It is chosen to be $A=0.05\Lambda$,
with $\Lambda$ given in (\ref{NA2}). Using the above smooth cutoff
procedure, the above choice of parameters leads for vanishing
magnetic field and at $T=\mu=0$ to the constituent mass $m\simeq
308$ MeV.\footnote{For sharp UV-cutoff, $m$ turns out to be $m\simeq
300$ MeV, as expected.} Let us notice that the solutions of
(\ref{NA1}) are in general ``local'' minima of the theory. Keeping
$\sigma_{0}\neq 0$ and looking for ``global'' minima of the system
described by $\Omega_{\mbox{\tiny{eff}}}(m;T,\mu,eB)$ from
(\ref{NE14b}), it turns out that only in the regime $\mu\in[0,350]$
MeV, $T\in [0,390]$ MeV and $\tilde{e}B\in [0,0.8]$ GeV$^{2}$, the
global minima of $\Omega_{\mbox{\tiny{eff}}}$ are described by
nonzero $\sigma_{0}$. In these regimes, the chiral symmetry is
spontaneously broken by non-vanishing $\sigma_{0}$.\footnote{For
$m_{0}\neq 0$ the chiral symmetry of the original Lagrangian is
explicitly broken.} All our numerical computations in the present
section are therefore limited to these regimes. Note that because of
non-vanishing quark mass $m_{0}$, the transition from the chiral
symmetry broken phase to the normal phase is a smooth crossover (see
the descriptions below).
\par
In Fig. \ref{fig1}, the $T,\mu$ and $eB$ dependence of $m$ are
presented. In Fig. \ref{fig1}(a), the $T$-dependence of $m$ is
demonstrated for fixed $\mu=0$ and $eB=0,0.2,0.5$ GeV$^{2}$.
Although the transition from the chiral symmetry broken phase, with
$m\neq 0$ to the normal phase, with $m\simeq m_{0}\approx 0$, is a
smooth crossover, but as it turns out, for stronger magnetic fields
the transition to the normal phase occurs for larger values of $T$,
whereas for $eB=0$, this transition temperature into the crossover
region is smaller. Moreover, at $T\in [0,100]$ MeV, where $m$ is
almost constant, the value of $m$ increases with increasing $eB$.
All these effects are related with the phenomena of magnetic
catalysis \cite{klimenko1992, miransky1995}, according to which,
magnetic fields enhance the production of
$\sigma_{0}\sim\langle\bar{\psi}\psi\rangle$ condensate, even for
very small coupling between the fermions, and therefore catalyze the
dynamical chiral symmetry breaking. Similar effects occur also in
Fig. \ref{fig1}(b), where $m$ is plotted as a function of $\mu$, at
fixed $T=120$ MeV and for various $eB=0,0.2,0.5$ GeV$^{2}$. At
$\mu=0$, for instance, the value of $m$ increases with increasing
$eB$. In Fig. \ref{fig1}(c), the $eB$-dependence of $m$ is
demonstrated for fixed $\mu=0$ and $T=60,180$ and $220$ MeV. As it
turns out, for fixed value of $eB$, $m$ decreases with increasing
$T$, and as it turns out, this ``melting'' effect persists in the
whole range of $eB\in [0, 0.8]$ GeV$^{2}$, although it is partly
compensated by the magnetic field in the regime $eB>0.6$ GeV$^{2}$.
Let us notice that, according to our results in
\cite{fayazbakhsh2010-1,fayazbakhsh2010}, for a certain threshold
magnetic field $eB_{t}\simeq 0.45$ GeV$^{2}$, the magnetic field is
strong enough and forces the dynamics of the system to be mainly
described by the LLL. In this regime, $m$ increases linearly with
increasing $eB$ [see Fig. \ref{fig1}(c)]. Later, in
\cite{rebhan2011}, the threshold magnetic field is estimated to be
in the order of $B\simeq 10^{19}$ Gau\ss. In the present paper,
however, the threshold magnetic field turns out to be $eB_{t}\geq
0.7$ GeV$^{2}$ [or equivalently $B\simeq 1.2\times 10^{20}$
Gau\ss].\footnote{The exact value of threshold magnetic field
$eB_{t}$ is determined from $\lfloor
\frac{\Lambda^{2}}{|qeB|}\rfloor=0$, where $\lfloor a\rfloor$ is the
greatest integer less than or equal to $a$. For up quark
$eB_{t}\simeq 0.67$ GeV$^{2}$ and for down quark $eB_{t}\simeq 1.33$
GeV$^{2}$.} The $T$ and $eB$ dependence of $m$ at fixed chemical
potential $\mu$ and various $eB$ and $T$ are discussed recently in
\cite{condensate-lattice} using lattice gauge theory methods in the
presence of constant (electro)magnetic fields. Our original results
from \cite{fayazbakhsh2010} as well as the results presented in
Figs. \ref{fig1}(a) and \ref{fig1}(c) are consistent with the
results arising from lattice simulations \cite{condensate-lattice}.
\begin{figure*}[hbt]
\includegraphics[width=5.5cm,height=4cm]{fig2a-phase-T-mu-eB-0-to-0.6.eps}
\includegraphics[width=5.5cm,height=4cm]{fig2b-phase-T-eB-mu-240-to-380.eps}
\includegraphics[width=5.5cm,height=4cm]{fig2c-phase-mu-eB-T-30-to-150.eps}
\caption{Complete phase portrait of a two-flavor magnetized NJL
model at finite $T$, $\mu$ and $eB$ in the chiral limit of vanishing
quark mass $m_{0}$. Blue solid (green dashed) lines
denote the second (first) phase order phase transition between the chiral symmetry broken and normal phases. Two branches of the first order
critical line for $\mu=320$ MeV are denoted by green double-dashed lines. }\label{fig2}
\end{figure*}
\par
According to our results in \cite{fayazbakhsh2010}, in the chiral
limit $m_{0}\to 0$ and for vanishing magnetic field, at high
temperature and small chemical potential, the transition from the
chiral symmetry broken to the normal phase is of second order. In
contrast, at low temperatures and higher densities, the second order
phase transition goes over to a first order one. In the presence of
a uniform magnetic field, this picture remains essentially the same.
The only difference is that for $\mu=0$, the transition temperature
increases with increasing $eB$. Moreover, for $eB\neq 0$, the second
order phase transition occurs at higher temperatures and lower
densities comparing to the case of vanishing magnetic fields. These
two effects of the uniform magnetic field on the $T-\mu$ phase
diagram of a two-flavor NJL model in the chiral limit are
demonstrated in Fig. \ref{fig2}(a). Both effects are manifestations
of the phenomenon of magnetic catalysis in the presence of constant
magnetic fields \cite{klimenko1992, miransky1995}. In all the plots
of Fig. \ref{fig2}, the green dashed (blue solid) lines denote first
(second) order phase transitions. To determine the first and second
order phase transitions, the method described in \cite{sato1997,
inagaki2003, fayazbakhsh2010,fayazbakhsh2010-1} is used. The first
order critical lines between the chiral symmetry breaking and the
normal phase is determined by solving
\begin{eqnarray}\label{NA4}
&&\hspace{-0.8cm}\frac{\partial
\Omega_{\mbox{\tiny{eff}}}(\bar{m};T,\mu,eB)}{\partial
\bar{m}}\bigg|_{m}=0, \nonumber\\
&&\hspace{-0.8cm}\Omega_{\mbox{\tiny{eff}}}(m\neq
0;T,\mu,eB)=\Omega_{\mbox{\tiny{eff}}}(m=0;T,\mu,eB),
\end{eqnarray}
simultaneously.\footnote{In the chiral limit $m_{0}\to 0$, $m\equiv
\sigma_{0}$.} The second order critical line between these two
phases is determined using
\begin{eqnarray}\label{NA5}
\lim_{m^{2}\rightarrow 0}\frac{\partial
\Omega_{\mbox{\tiny{eff}}}(m; T,\mu,eB)}{\partial m^{2}}=0.
\end{eqnarray}
To make sure that after the second order phase transition the global
minima of the effective potential are shifted to $m=0$ in
(\ref{NA5}), and in order to avoid instabilities, an analysis
similar to \cite{berges1998} is also performed.
\par
In Fig. \ref{fig2}(b), the $T-eB$ phase diagram of our model is
plotted for various $\mu=240, 280, 320, 340$ MeV. Let us notice that
for $\mu=320$ MeV [dashed-dotted lines in Fig. \ref{fig2}(b)], the
first order critical line has two branches -- the first one for
$eB<0.1$ GeV$^{2}$ and the second one for $eB>0.5$ GeV$^{2}$, at
relatively low temperature. In the intermediate region $0.1<eB<0.5$
GeV$^{2}$, the chiral symmetry breaking phase is disfavored. In
\cite{fayazbakhsh2010}, we have studied the $T-eB$ phase diagram of
a two-flavor NJL model including meson \textit{and} diquark
condensates. We have shown that in the above mentioned intermediate
regime $0.1<eB<0.5$ GeV$^{2}$ at low temperature and for $\mu=320$
MeV, the two-flavor color superconducting (2SC) phase is favored.
For $\mu>320$ MeV, the first branch appearing for $\mu=320$ MeV and
$eB<0.1$ GeV$^{2}$ disappears, and the whole region of $eB<0.6$
GeV$^{2}$ is favored by either the normal phase, when no diquarks
exist in the model, or by the 2SC superconducting phase, when the
model includes both meson and diquark condensates (see Fig. 14 of
\cite{fayazbakhsh2010}).
\par
In Fig. \ref{fig2}(c), the $\mu-eB$ phase diagram of our two-flavor
NJL model including chiral condensates $(\sigma,\vec{\pi})$ is
plotted for various $T=30,60,100,150$ MeV. At very low temperature,
$T<100$ MeV, the transition between the chiral symmetry breaking and
normal phase is of first order (green dashed lines). Whereas at
these temperatures and for $eB<0.1$ GeV$^{2}$, the critical $\mu$ is
almost constant, it decreases by increasing the strength of the
magnetic field in the regime $0.1<eB<0.4$ GeV$^{2}$. This effect,
which is for the first time observed in
\cite{fayazbakhsh2010-1,fayazbakhsh2010}, and later also in
\cite{rebhan2011}, is called the ``inverse magnetic catalysis'',
according to which at low temperature, the addition of the magnetic
field decreases the critical chemical potential for chiral symmetry
restoration \cite{fayazbakhsh2010, rebhan2011}. However, by
increasing the magnetic field up to $eB>0.5$ GeV$^{2}$, i.e. by
entering the regime of LLL dominance, this effect is disfavored, so
that $\mu_{c}$ again increases with increasing the strength of the
magnetic field. Let us also note that similar phenomenon of inverse
magnetic catalysis appears also in Fig. \ref{fig2}(b), where for
fixed $\mu=280$ MeV, the first order critical line $T_{c}$ (green
dashed line between $C_{1}$ and $C_{2}$) decreases with
\textit{increasing} $eB$ from $0.1<eB<0.3$ GeV$^{2}$ and continues
to grow up with increasing the strength of the magnetic field up to
regime of LLL dominance, i.e $eB>0.5$ GeV$^{2}$. The inverse
magnetic catalysis effect may be related to the well-known
van-alphen--de Haas oscillations, which occur whenever Landau levels
pass the quark Fermi level \cite{alfven}. Similar effects are also
observed in \cite{inagaki2003, fayazbakhsh2010,fayazbakhsh2010-1}.
At higher temperature $T>100$ MeV and for $eB$ smaller than a
certain critical $eB_{c}$, there is a second order phase transition
between the chiral symmetry broken and normal phases (see the blue
solid lines in Fig. \ref{fig2}(c) for $T=100, 150$ MeV, that replace
the green dashed lines for $T<100$ MeV). The critical magnetic field
$eB_{c}$, for which the second order phase ends and goes over into a
first order phase transition is larger for higher temperature
[compare $eB_{c}$ for two critical points (black bullets) $C_{1}$
and $C_{2}$ in Fig. \ref{fig2}(c)]. This demonstrates the
destructive effect of the temperature, which is partly compensated
in the regime of strong magnetic fields, $eB>0.7$ GeV$^{2}$. More
details on the interplay between three parameters $T,\mu$ and $eB$
on the formation of chiral condensates $\sigma_{0}$ in the chiral
limit $m_{0}\to 0$ are discussed in \cite{fayazbakhsh2010}.
\subsection{$(M_{\sigma}^{2},
M_{\pi^{0}}^{2})$ and $({\cal{G}}^{\mu\nu}, {\cal{F}}^{\mu\nu})$ at
$(T\neq 0, \mu=0,eB\neq 0)$}\label{sec5p2}
\par\noindent
\begin{figure*}[hbt]
\includegraphics[width=5.5cm,height=4cm]{fig3a-MsigmaMpi-T-eB0-mu0-H.eps}
\includegraphics[width=5.5cm,height=4cm]{fig3b-G00-G33-T-eB0-mu0-H.eps}
\includegraphics[width=5.5cm,height=4cm]{fig3c-F00-F33-T-eB0-mu0-H.eps}
\caption{The $T$-dependence of $(M_{\sigma}^{2},
(M_{\vec{\pi}}^{2})_{\ell\ell})$ as well as $({\cal{G}}^{00},
{\cal{G}}^{ii})$, and $(({\cal{F}}^{00})_{\ell\ell},
({\cal{F}}^{ii})_{\ell\ell})$ with $\ell,i=1,2,3$ for vanishing
magnetic field $eB$ and at $\mu=0$.}\label{fig3}
\end{figure*}
As we have described in the first part of this section, the results
presented in (\ref{ND28b}) and (\ref{ND36b}) for $M_{\sigma}^{2}$
and $M_{\pi^{0}}^{2}$, and in (\ref{ND42b}) and (\ref{ND57b}) for
${\cal{G}}^{\mu\nu}$ as well as in (\ref{ND61b}) and (\ref{ND62b})
for ${\cal{F}}^{\mu\nu}$ are given up to an integration over
$p_{3}$-momentum and a summation over Landau levels $p$. We have
performed the $p_{3}$-integration for the set of parameters
$(\Lambda,G,m_{0})$ from (\ref{NA2}) and the smooth cutoff function
(\ref{NA3}) numerically, and will present the results in what
follows. In particular, we will present the $T$-dependence of
$(M_{\sigma}^{2}, M_{\pi^{0}}^{2})$ and
$({\cal{G}}^{\mu\nu},{\cal{F}}^{\mu\nu})$ for fixed $\mu=0$ and
various $eB=0,0.03,0.2,0.3$ GeV$^{2}$.
\par
Let us start by giving the numerical values of $(M_{\sigma}^{2},
M_{\vec{\pi}}^{2})$ and $({\cal{G}}^{\mu\nu},{\cal{F}}^{\mu\nu})$ at
$T=\mu=eB=0$. According to their definitions in
(\ref{ND4b})-(\ref{ND7b}), where, for $eB=0$, $S_{Q}$ is to be
replaced by the ordinary fermion propagator
$S(z,0)=\int\frac{d^{4}p}{(2\pi)^{4}}\frac{ie^{-ip\cdot
z}}{\gamma\cdot p-m}$ at zero $(T,\mu)$, we have the following
identities and numerical values
\begin{eqnarray}\label{NA6}
M_{\sigma}^{2}&=&3.656\times 10^{-2}\mbox{GeV}^{2},\nonumber\\
M_{\pi_{\ell}}^{2}&=&1.734\times
10^{-3}\mbox{GeV}^{2},~~~\forall\ell=1,2,3,
\end{eqnarray}
as well as
\begin{eqnarray}\label{NA7}
{\cal{G}}^{00}&=&-{\cal{G}}^{ii}~~~~~\qquad \forall i=1,2,3,\nonumber\\
({\cal{F}}^{00})_{\ell\ell}&=&-({\cal{F}}^{ii})_{\ell\ell}\qquad\forall
i=1,2,3,
\end{eqnarray}
where
\begin{eqnarray}\label{NA8c}
{\cal{G}}^{00}&=&5.381\times 10^{-2},\nonumber\\
({\cal{F}}^{00})_{\ell\ell}&=&9.143\times 10^{-2},\qquad \forall
\ell=1,2,3.
\end{eqnarray}
Moreover, we have
$({\cal{F}}^{ii})_{11}=({\cal{F}}^{ii})_{22}=({\cal{F}}^{ii})_{33}$
for all $i=0,\cdots,3$.
\par
At finite temperature and vanishing $\mu$ and $eB$, although the
above relations (\ref{NA6}) and (\ref{NA7}) between different
components of $M_{\vec{\pi}}^{2}$ as well as ${\cal{G}}^{\mu\nu}$
and ${\cal{F}}^{\mu\nu}$ are still valid, i.e. we have
\begin{eqnarray}\label{NA8}
(M_{\vec{\pi}}^{2})_{11}=(M_{\vec{\pi}}^{2})_{22}=(M_{\vec{\pi}}^{2})_{33},
\end{eqnarray}
as well as
\begin{eqnarray}\label{NA9}
{\cal{G}}^{00}&=&-{\cal{G}}^{11}=-{\cal{G}}^{22}=-{\cal{G}}^{33},\nonumber\\
({\cal{F}}^{00})_{\ell\ell}&=&-
({\cal{F}}^{ii})_{\ell\ell},\qquad\forall \ell,i=1,2,3,
\end{eqnarray}
but their values become temperature dependent. In Fig. \ref{fig3},
the $T$-dependence of $(M_{\sigma}^{2},
(M_{\vec{\pi}}^{2})_{\ell\ell})$ as well as $({\cal{G}}^{00},
{\cal{G}}^{ii})$, and $(({\cal{F}}^{00})_{\ell\ell},
({\cal{F}}^{ii})_{\ell\ell})$ with $\ell,i=1,2,3$ are plotted for
vanishing $eB$ and $\mu$. As it is demonstrated in Fig.
\ref{fig3}(a), $M^{2}_{\sigma}$ and $(M_{\vec{\pi}}^{2})_{\ell\ell}$
are degenerate at $T> 220$ MeV. This is because the difference
between these two functions are in terms proportional to the
constituent quark mass $m=m_{0}+\sigma_{0}$, that, according to Fig.
\ref{fig1}(a) almost vanishes in the crossover region $T>220$ MeV.
Later, we will show that the degeneracy of $M^{2}_{\sigma}$ and
$(M_{\vec{\pi}}^{2})_{\ell\ell}$ for $T>220$ MeV leads to the
expected degeneracy of $\sigma$ and $\pi^{0}$ meson masses
$(m_{\sigma},m_{\pi^{0}})$ for vanishing $eB$ and $\mu$ in the
crossover region $T>220$ MeV \cite{buballa2012}.
\begin{figure}[hbt]
\includegraphics[width=7.7cm,height=4.5cm]{fig4a-MsigmaT-eB0-05-mu0-H.eps}
\includegraphics[width=7.7cm,height=4.5cm]{fig4b-M33sqT-eB0-05-mu0-H.eps}
\caption{The coefficient $M_{\sigma}^{2}$ (panel a) and
$M_{\pi^{0}}^{2}$ (panel b), are plotted as functions of
$T\in[0,400]$ MeV at $\mu=0$ and for $eB=0.03,0.2,0.3$ GeV$^{2}$
(red solid, blue dashed and black dotted lines, respectively)
.}\label{fig4}
\end{figure}
\par\noindent
Let us finally consider the case of $(T,eB\neq
0,\mu=0)$.\footnote{In this paper, we are interested on the effects
of magnetic fields on the meson masses and their refraction indices
at $T\neq 0$ and $\mu=0$. The results for $T\neq 0$ and $\mu\neq 0$
as well as the $eB$-dependence of these quantities will be presented
elsewhere \cite{sadooghi2012-3}.} As it turns out, the degeneracy in
$(M_{\vec{\pi}}^{2})_{\ell\ell}$ as well as ${\cal{G}}^{ii}$ and
$({\cal{F}}^{ii})_{\ell\ell}$, with $\ell,i=1,2,3$ at $(T,eB\neq
0,\mu=0)$ breaks down by finite magnetic fields. In other words, for
$(T,eB\neq 0, \mu=0)$, in contrast to (\ref{NA8}), we have
\begin{eqnarray}\label{NA10}
(M_{\vec{\pi}}^{2})_{11}=(M_{\vec{\pi}}^{2})_{22}\neq
(M_{\vec{\pi}}^{2})_{33}.
\end{eqnarray}
Moreover, in contrast to (\ref{NA9})
\begin{eqnarray}\label{NA11}
{\cal{G}}^{00}=-{\cal{G}}^{33}\neq {\cal{G}}^{11}={\cal{G}}^{22}.
\end{eqnarray}
Similarly, in contrast to (\ref{NA9}), although
$({\cal{F}}^{\mu\mu})_{11}=({\cal{F}}^{\mu\mu})_{22}\neq
({\cal{F}}^{\mu\mu})_{33}$, for all $\mu=0,\cdots,3$, but
\begin{eqnarray}\label{NA12}
({\cal{F}}^{00})_{\ell\ell}=-({\cal{F}}^{33})_{\ell\ell}\neq
({\cal{F}}^{11})_{\ell\ell}=({\cal{F}}^{22})_{\ell\ell},
\end{eqnarray}
$\forall~\ell=1,2,3$. In Fig. \ref{fig4}, the $T$-dependence of
$M_{\sigma}^{2}$ (panel a) and $M_{\pi^{0}}^{2}$ [or equivalently,
$(M_{\vec{\pi}}^{2})_{33}$] (panel b) are demonstrated at $\mu=0$
and for non-vanishing $eB=0.03, 0.2, 0.3$ GeV$^{2}$. The exact
$(T,\mu,eB)$ dependence of
$(M_{\vec{\pi}}^{2})_{11}=(M_{\vec{\pi}}^{2})_{22}$ will be used in
\cite{sadooghi2012-3}, to determine the $(T,\mu,eB)$-dependence of
charged pion masses.
\begin{figure}[hbt]
\includegraphics[width=7.7cm,height=4.5cm]{fig5a-G00-T-eB-003-05-mu-0-H.eps}
\par\hspace{0.2cm}
\includegraphics[width=8.7cm,height=5cm]{fig5b-Gperp-T-eB-003-05-mu-0-H.eps}
\caption{The coefficients ${\cal{G}}^{00}=-{\cal{G}}^{33}$ (panel
a), ${\cal{G}}^{11}={\cal{G}}^{22}$ (panel b) are plotted as
functions of $T\in[0,400]$ MeV for $\mu=0$ and $eB=0.03,0.2,0.3$
GeV$^{2}$.}\label{fig5}
\end{figure}
\par
In Fig. \ref{fig5}, the $T$-dependence of ${\cal{G}}^{00}$ and
${\cal{G}}^{33}$ (${\cal{G}}^{00}=-{\cal{G}}^{33}$) (panel a) as
well as ${\cal{G}}^{11}={\cal{G}}^{22}$ (panel b) are plotted for
$\mu=0$ and $eB=0.03, 0.2, 0.3$ GeV$^{2}$. Whereas ${\cal{G}}^{00}$
is positive, ${\cal{G}}^{11}={\cal{G}}^{22}$ and ${\cal{G}}^{33}$
are negative. Later, we will use the matrix elements of
$M_{\sigma}^{2}$ from Fig. \ref{fig4} and the coefficients
${\cal{G}}^{\mu\mu}, \mu=0,\cdots,3$ from Fig. \ref{fig5}, to
determine the $T$-dependence of $m_{\sigma}$ at $\mu=0$ and for
various $eB\neq 0$.
\begin{figure*}[hbt]
\includegraphics[width=7.7cm,height=4.5cm]{fig6a-new-H.eps}
\includegraphics[width=7.7cm,height=4.5cm]{fig6b-new-H.eps}
\caption{The diagonal elements of ${\cal{F}}^{\mu\mu},
\mu=0,\cdots,3$ matrices are plotted as functions of $T\in[0,400]$
MeV at vanishing chemical potential ($\mu=0$) and for $eB=0.03,0.2,
0.3$ GeV$^{2}$. The identities from (\ref{NA12}) for $\ell=3$,
${\cal{F}}^{11}={\cal{F}}^{22}$ and
${\cal{F}}^{00}=-{\cal{F}}^{33}$, are explicitly demonstrated in
these plots.}\label{fig6}
\end{figure*}
\par
In Fig. \ref{fig6}, the $T$-dependence of ${\cal{F}}^{\mu\mu},
\mu=0,\cdots,3$ [or equivalently, $({\cal{F}}^{\mu\mu})_{33}$]
matrices are plotted for vanishing chemical potential and $eB=0.03,
0.2,0.3$ GeV$^{2}$. In the subsequent section, we will in particular
use $M_{\pi^{0}}^{2}$, ${\cal{F}}^{00}$ and ${\cal{F}}^{33}$ to
determine the $T$-dependence of $\pi^{0}$ pole and screening masses
as well as the direction-dependent refraction indices of neutral
pion in the longitudinal and transverse directions with respect to
the direction of the external magnetic field.
\subsection{Masses and directional refraction indices of neutral mesons}\label{sec5p3}
\par\noindent
In this section, we will use the results obtained in Sec.
\ref{sec5p2} to determine the $T$-dependence of pole and screening
masses as well as the direction-dependent refraction indices of
neutral mesons, $\sigma$ and $\pi^{0}$, in a hot and dense
magnetized quark matter. In what follows, we will first define these
quantities according to the descriptions presented in Sec.
\ref{sec2} and the corresponding energy dispersion relations for
neutral and charged mesons $\sigma$ and $\pi_{\ell}, \ell=1,2,3$
mesons [see also (\ref{NN15}) and (\ref{NN15b})],
\begin{eqnarray}\label{NA13}
E_{\sigma}^{2}&=&\frac{1}{{\cal{G}}^{00}}\left({\cal{G}}^{11}p_{1}^{2}+{\cal{G}}^{22}p_{2}^{2}+{\cal{G}}^{33}p_{3}^{2}+M_{\sigma}^{2}\right),
\nonumber\\
\lefteqn{\hspace{-0.5cm}E_{\pi_{\ell}}^{2}\hspace{0.05cm}=
}\nonumber\\
&&\hspace{-1.1cm}\frac{1}{({\cal{F}}^{00})_{\ell\ell}}\big[({\cal{F}}^{11})_{\ell\ell}p_{1}^{2}+({\cal{F}}^{22})_{\ell\ell}p_{2}^{2}+
({\cal{F}}^{33})_{\ell\ell}p_{3}^{2}+
M_{\pi_{\ell}}^{2}\big].\hspace{-0.2cm}\nonumber\\
\end{eqnarray}
The pole and screening masses of $\sigma$-mesons, $m_{\sigma}$ and
$m_{\sigma}^{(i)}, i=1,2,3$, are then defined by
\begin{eqnarray}\label{NA14}
\hspace{-0.5cm}m_{\sigma}=\bigg[\frac{\mbox{Re}~M_{\sigma}^{2}}{\mbox{Re}~{\cal{G}}^{00}}\bigg]^{1/2},~~\mbox{as
well as}~~m_{\sigma}^{(i)}=\frac{m_{\sigma}}{u_{\sigma}^{(i)}},
\end{eqnarray}
where, $u_{\sigma}^{(i)}$, is the directional refraction index of
$\sigma$-mesons in the $i$-th direction,
\begin{eqnarray}\label{NA15}
u_{\sigma}^{(i)}=\bigg|\frac{\mbox{Re}~{\cal{G}}^{ii}}{\mbox{Re}~{\cal{G}}^{00}}\bigg|^{1/2},
\qquad i=1,2,3.
\end{eqnarray}
The $(T,\mu,eB)$-dependence of $m_{\sigma}, m_{\sigma}^{(i)}$ and
$u_{\sigma}^{(i)}$ are given by plugging $M_{\sigma}^{2}$ and
${\cal{G}}^{\mu\mu}, \mu=0,\cdots,3$ from (\ref{ND28b}),
(\ref{ND42b}) and (\ref{ND57b}) in the above relations. As concerns
the pions, we choose the basis $(\pi^{\pm},\pi^{0})$ instead of the
real basis $\vec{\pi}=(\pi_{1},\pi_{2},\pi_{3})$. Here,
$\pi^{\pm}\equiv (\pi_{1}\pm i\pi_{2})/\sqrt{2}$ and $\pi^{0}\equiv
\pi_{3}$. Using this new imaginary basis, the energy dispersion
relations $E_{\pi_{\ell}}$ from (\ref{NA13}) for
$(\pi^{\pm},\pi^{0})$ turn out to be
\begin{eqnarray}\label{NA16}
\lefteqn{E_{\pi^{\pm}}^{2}\equiv
\frac{eB(2\ell+1)}{[({\cal{F}}^{00})_{11}\mp
i({\cal{F}}^{00})_{12}]}
}\nonumber\\
&&+\frac{[({\cal{F}}^{33})_{11}\mp
i({\cal{F}}^{33})_{12}]}{[({\cal{F}}^{00})_{11}\mp
i({\cal{F}}^{00})_{12}]}p_{3}^{2}+\frac{[(M_{\vec{\pi}}^{2})_{11}\mp
i(M_{\vec{\pi}}^{2})_{12}]}{[({\cal{F}}^{00})_{11}\mp
i({\cal{F}}^{00})_{12}]},\nonumber\\
\lefteqn{E_{\pi^{0}}^{2}\equiv
\frac{({\cal{F}}^{11})_{33}}{({\cal{F}}^{00})_{33}}~p_{1}^{2}
}\nonumber\\
&&+ \frac{({\cal{F}}^{22})_{33}}{({\cal{F}}^{00})_{33}}~p_{2}^{2}+
\frac{({\cal{F}}^{33})_{33}}{({\cal{F}}^{00})_{33}}~p_{3}^{2}+
\frac{(M_{\vec{\pi}}^{2})_{33}}{({\cal{F}}^{00})_{33}}.
\end{eqnarray}
Note that since $\pi^{\pm}$ are charged pseudoscalar particles,
their energy dispersion relations in the presence of constant
magnetic fields have discrete contributions. According to our
results in \cite{jafarisalim2006}, the energy levels are labeled by
$\ell$, in the form given in the first term in (\ref{NA16}). The
above dispersion relations for charged pions are comparable with the
dispersion relations presented recently in \cite{anderson2012-2}
(see Eq. (2.10) in \cite{anderson2012-2}). According to the
formalism presented originally in \cite{miransky1995} and
generalized to a multi-flavor system in the present paper, in
contrast to the relations presented in \cite{anderson2012-2} for
charged pions, the nontrivial form factors
$({\cal{F}}^{\mu\mu})_{\ell m}$, $\forall~ \ell,m\neq 3$ in
(\ref{NA16}), consider the effect of external magnetic fields on
charged quarks produced at the early stage of the heavy-ion
collisions. Moreover, in the formalism presented in
\cite{anderson2012-2}, in contrast to the dispersion relations
presented in (\ref{NA16}), the energy dispersion relation of neutral
pion is unaffected by the external magnetic field. Using
(\ref{NA16}), and in analogy to (\ref{NA14}), the pions pole masses
are defined by
\begin{eqnarray}\label{NA17}
m_{\pi^{\pm}}&=&\bigg[\frac{\mbox{Re}~[(M_{\vec{\pi}}^{2})_{11}\mp
i(M_{\vec{\pi}}^{2})_{12}]}{\mbox{Re}~[({\cal{F}}^{00})_{11}\mp
i({\cal{F}}^{00})_{12}]}\bigg]^{1/2},\nonumber\\
m_{\pi^{0}}&=&\bigg[\frac{\mbox{Re}~(M_{\pi}^{2})_{33}}{\mbox{Re}~({\cal{F}}^{00})_{33}}\bigg]^{1/2}.
\end{eqnarray}
In particular, the screening mass and the refraction index of
neutral pions in the $i$-th direction are given by
\begin{eqnarray}\label{NA18}
\hspace{-0.5cm}m_{\pi^{0}}^{(i)}=\frac{m_{\pi^{0}}}{u_{\pi^{0}}^{(i)}},~~~\mbox{and}~~~
u_{\pi^{0}}^{(i)}=\bigg|\frac{\mbox{Re}~({\cal{F}}^{ii})_{33}}{\mbox{Re}~({\cal{F}}^{00})_{33}}\bigg|^{1/2},
\end{eqnarray}
respectively. In this paper, we will focus on the $T$-dependence of
the mass and refraction index of neutral pions at fixed $\mu$ and
finite $eB$. The study of the effect of constant magnetic fields on
charged pion masses and refraction indices will be postponed to a
future publication \cite{sadooghi2012-3}.
\par
Let us start with the case $T=\mu=eB=0$. Using the numerical results
from (\ref{NA6}) and (\ref{NA7}), in this case, the $\sigma$-meson
mass and refraction index are given by
\begin{eqnarray}\label{NA19}
m_{\sigma}\simeq 824.3~\mbox{MeV},\qquad\mbox{and}\qquad
u_{\sigma}^{(i)}=1,
\end{eqnarray}
and therefore
\begin{eqnarray}\label{NA21b}
m_{\sigma}^{(i)}=m_{\sigma},~\forall i=1,2,3.
\end{eqnarray}
Similarly, the $\vec{\pi}$-meson mass and refraction index at
$T=\mu=eB=0$ read
\begin{eqnarray}\label{NA20}
m_{\pi_{\ell}}\simeq 137.7~\mbox{MeV},\qquad\mbox{and}\qquad
u_{\pi_{\ell}}^{(i)}=1,
\end{eqnarray}
$\forall~\ell,i=1,2,3$, and therefore
\begin{eqnarray}\label{NA22b}
m_{\pi_{\ell}}^{(i)}=m_{\pi_{\ell}},~\forall \ell,i=1,2,3.
\end{eqnarray}
At $(T\neq 0,\mu=eB=0)$, $m_{\sigma}$ is given by (\ref{NA14}).
Similarly, according to (\ref{NA13}), the pion masses
$m_{\vec{\pi}}$ are defined by
\begin{eqnarray}\label{NA21}
m_{\pi_{\ell}}=\bigg[\frac{\mbox{Re}~(M_{\vec{\pi}}^{2})_{\ell\ell}}{\mbox{Re}~
({\cal{F}}^{00})_{\ell\ell}}\bigg]^{1/2},\qquad \forall\ell=1,2,3.
\end{eqnarray}
\begin{figure}[hbt]
\includegraphics[width=7.7cm,height=4.5cm]{fig7a-sigma-pion0-mass-T-eB0-mu0-H.eps}
\includegraphics[width=7.7cm,height=4.5cm]{fig7b-sigma-pion0-mass-T-eB0-mu0-H.eps}
\caption{(a) The $T$-dependence of $\sigma$ and $\vec{\pi}$ mesons
masses is demonstrated at $\mu=0$ and for vanishing magnetic field
(solid line for $m_{\sigma}$ and dashed line for $m_{\vec{\pi}}$).
Comparing these curves with the $T$-dependence of the constituent
quark mass $m=m_{0}+\sigma_{0}$ (dotted line), shows that a mass
degeneracy between $m_{\sigma}$ and $m_{\vec{\pi}}$ occurs in the
crossover region at $T>220$ MeV. (b) The $T$-dependence of the pole
masses of neutral mesons is plotted for the case when
$F_{2}^{\mu\nu}$ in (\ref{NN10}) vanishes. The numerical results for
$m_{\sigma}$ and $m_{\vec{\pi}}$ are in good agreement with the
results recently presented in \cite{buballa2012}.}\label{fig7}
\end{figure}
\begin{figure}[hbt]
\includegraphics[width=7.7cm,height=4.5cm]{fig8a-msigma-T-mu0-120-B0-H.eps}
\includegraphics[width=7.7cm,height=4.5cm]{fig8b-mpi-T-mu0-120-B0-H.eps}
\caption{The $T$-dependence of $m_{\sigma}$ (panel a) and
$m_{\vec{\pi}}$ (panel b) is plotted for $eB=0$ and at $\mu=0,120$
MeV. As in the case of $\mu=0$, at $\mu=120$ MeV, the neutral meson
masses are still degenerate in the crossover region $T>220$
MeV.}\label{fig8}
\end{figure}
\par\noindent Because of the identity (\ref{NA8}), which is still valid at
$(T\neq 0,\mu=eB=0)$, the masses of $\vec{\pi}=(\pi_{1}, \pi_{2},
\pi_{3})$ are degenerate, as in $T=0$ case [see (\ref{NA20})]. In
Fig. \ref{fig7}(a), the $T$-dependence of $m_{\sigma}$ and
$m_{\vec{\pi}}$ is plotted for $\mu=eB=0$ (black solid line for
$m_{\sigma}$ and red dashed line for $m_{\vec{\pi}}$). Here, the
$T$-dependence of the coefficients ${\cal{G}}^{\mu\mu}$ and
${\cal{F}}^{\mu\mu}$ from Fig. \ref{fig3} is used. We have also
plotted the $T$-dependence of the constituent mass
$m=m_{0}+\sigma_{0}$ in Fig. \ref{fig7}(a) (dotted line). Comparing
these curves, it turns out that, as expected, the mass degeneracy of
$\sigma$ and $\vec{\pi}$ meson masses occurs in the crossover region
$T>220$ MeV. To compare the result presented in Fig. \ref{fig7}(a),
with the recent results for $m_{\vec{\pi}}$ and $m_\sigma$,
presented e.g. in \cite{buballa2012}, we have set $F_{2}^{\mu\nu}=0$
in (\ref{NN10}), and determined the pole masses of neutral mesons
and the chiral condensate using the same method as presented in this
paper. The numerical results for neutral meson masses and chiral
condensate for $T\neq 0$, $\mu=eB=0$ and vanishing $F_{2}^{\mu\nu}$
are plotted in Fig. \ref{fig7}(b). As it turns out, only
$m_{\sigma}$ changes relative to the case where $F_{2}^{\mu\nu}\neq
0$ [see Fig. \ref{fig7}(b)]. The numerical results are in good
agreement with the results presented in \cite{buballa2012}.
\par
As concerns the screening mass and refraction index of $\vec{\pi}$
mesons at $(T\neq 0, \mu=eB=0)$, we use the results of Fig.
\ref{fig3}, and in analogy to the definitions (\ref{NA14}) and
(\ref{NA15}) define the screening mass and refraction index of
$\vec{\pi}$ mesons by
\begin{eqnarray}\label{NA22}
m_{\pi_{\ell}}^{(i)}&=&\frac{m_{\pi_{\ell}}}{u_{\pi_{\ell}}^{(i)}},\qquad\mbox{where}\nonumber\\
u_{\pi_{\ell}}^{(i)}&=&\bigg|\frac{\mbox{Re}~({\cal{F}}^{ii})_{\ell\ell}}{\mbox{Re}~({\cal{F}}^{00})_{\ell\ell}}\bigg|^{1/2},
~~\forall \ell,i=1,2,3.
\end{eqnarray}
Using the definitions (\ref{NA15}) and (\ref{NA22}), and the
numerical results of ${\cal{G}}^{00}$ as well as
$({\cal{F}}^{00})_{\ell\ell}$ from Fig. \ref{fig3} at $(T\neq
0,\mu=eB=0)$, the $T$-dependence of the screening mass and
refraction index of $(\sigma,\vec{\pi})$ mesons can be determined
for all directions $i=1,2,3$. As it turns out, as in $T=0$ case, we
have
\begin{eqnarray}\label{NA23}
\begin{array}{rclcrcl}
u_{\sigma}^{(i)}&=&1,&\qquad&\forall i&=&1,2,3,\\
u_{\pi_{\ell}}^{(i)}&=&1,&\qquad&\forall \ell,i&=&1,2,3,
\end{array}
\end{eqnarray}
and therefore
\begin{eqnarray}\label{NA27b}
\begin{array}{rclcrcl}
m_{\sigma}^{(i)}&=&m_{\sigma},&\qquad&\forall i&=&1,2,3,\\
m_{\pi_{\ell}}^{(i)}&=&m_{\pi_{\ell}},&\qquad&\forall
\ell,i&=&1,2,3,
\end{array}
\end{eqnarray}
for the whole interval $T\in[0,400]$. These results are compatible
with the identities (\ref{NA9}). The fact that $u_{\pi_{\ell}}=1$
seems to be in contradiction with the results from \cite{son2000,
ayala2002}, where it is shown that at finite temperature because of
different pion decay constants in the spatial and temporal
directions, $f_{s}$ and $f_{t}$, at finite temperature, the
refraction index $u=\frac{f_{s}}{f_{t}}$ appearing in the energy
dispersion relation $\omega^{2}=u^{2}(\mathbf{p}^{2}+m^{2})$ is
smaller than one. Note, however, that in \cite{son2000, ayala2002},
the pions are self-interacting and $f_{s}$ and $f_{t}$ receive
$T$-dependent contributions from one-loop pion-self energy diagram,
that includes a $(\vec{\pi}^{2})^{2}$ vertex. In contrast, the pions
considered in the present paper are free.
\begin{figure*}[hbt]
\includegraphics[width=5.5cm,height=4cm]{fig9a-sigma-pion0-mass-T-eB003-mu0-H.eps}
\includegraphics[width=5.5cm,height=4cm]{fig9b-sigma-pion0-mass-T-eB02-mu0-H.eps}
\includegraphics[width=5.5cm,height=4cm]{fig9c-sigma-pion0-mass-T-eB05-mu0-H.eps}
\caption{The $T$-dependence of $m_{\sigma}$ and $m_{\pi^{0}}$ is
plotted for $eB=0.03,0.2,0.3$ GeV$^{2}$ and at $\mu=0$ MeV.
}\label{fig9}
\end{figure*}
\par
Let us also notice that the above results are still valid at
non-vanishing $\mu$ and for vanishing $eB$. In Fig. \ref{fig8}, we
have compared $m_{\sigma}$ and $m_{\pi_{\ell}}, \ell=1,2,3$ at
$\mu=0$ with their values at $\mu=120$ MeV for vanishing $eB$. Small
deviations from their value at $\mu=0$ appears for $m_{\sigma}$
(panel a). For $m_{\pi_{\ell}}, \ell=1,2,3$ (panel b) the difference
between $m_{\pi_{\ell}}$ at $\mu=0$ and $\mu=120$ MeV becomes larger
with increasing temperature. As it turns out, at non-vanishing
chemical potential, the degeneracy of the pion masses
$m_{\pi_{\ell}}$ is still valid for all $\ell=1,2,3$. Moreover,
$m_{\sigma}$ and $m_{\vec{\pi}}$ are also degenerate in the
crossover region $T>220$ MeV for $\mu=120$ MeV, as in the $\mu=0$
case.
\par
At finite $T$ and for non-vanishing magnetic fields, the pion masses
are not degenerate, i.e. we have $m_{\pi^{+}}\neq m_{\pi^{-}}\neq
m_{\pi^{0}}$ [see (\ref{NA10}) and (\ref{NA11}) and the definitions
of $m_{\pi^{\pm}}$ and $m_{\pi^{0}}$ from (\ref{NA17})]. In this
paper, we will focus on the $T$-dependence of $m_{\sigma}$ and
$m_{\pi^{0}}$. In Figs. \ref{fig9}(a)-\ref{fig9}(c), the
$T$-dependence of $(m_{\sigma},m_{\pi^{0}})$ masses are plotted for
$eB=0.03,0.2,0.3$ GeV$^{2}$ and at $\mu=0$. The expected degeneracy
of $m_{\sigma}$ and $m_{\pi^{0}}$ mesons in the crossover region can
be observed in all the plots of Fig. \ref{fig9}. However, as it
turns out, the overlap interval depends on $eB$ for fixed $\mu$.
Denoting the minimum temperature for which the overlap interval
starts with $T_{o}$, then for $eB=0.03$ GeV$^{2}$ we have
$T_{o}\simeq 210$ MeV, whereas for $eB=0.2,0.3$ GeV$^{2}$, $T_{o}$
are given by $T_{o}\simeq 220$ MeV and $T_{o}\simeq 240$ MeV,
respectively.
\begin{figure}[hbt]
\includegraphics[width=7.7cm,height=4.5cm]{fig10a-sigma-mass-T-mu0-eB003-05-H.eps}
\includegraphics[width=7.7cm,height=4.5cm]{fig10b-pi-mass-T-mu0-eB003-05-H.eps}
\caption{The $T$-dependence of $m_{\sigma}$ (panel a) and
$m_{\pi^{0}}$ (panel b) is plotted for $eB=0.03,0.2,0.3$ GeV$^{2}$
at $\mu=120$ MeV.}\label{fig10}
\end{figure}
\par
In Fig. \ref{fig10}, we have compared the $T$-dependence of the
masses of $\sigma$ and $\pi^{0}$ mesons for fixed $\mu=120$ MeV and
various $eB=0.03,0.2,0.3$ GeV$^{2}$. As it turns out, at temperature
below (above) the crossover region, the $\sigma$-meson masses
increase (decrease) with increasing the magnetic field strength.
This qualitative behavior of the $T$-dependence of $m_{\sigma}$ for
various $eB\neq 0$ is comparable with the results presented in
\cite{skokov2011} (see Fig. 3 in \cite{skokov2011}). The difference
arises from the fact that, in contrast to the present paper, the
quantum fluctuations of $\sigma$-mesons is considered in
\cite{skokov2011}. And, in contrast to the present paper, the
contribution of $F_{2}^{\mu\nu}$ appearing in (\ref{NN10}) is not
considered in \cite{skokov2011}.
\begin{figure}[hbt]
\includegraphics[width=7.7cm,height=4.5cm]{fig11a-velosigma-T-mu0-H-C.eps}
\includegraphics[width=7.7cm,height=4.5cm]{fig11b-velopi0-T-mu0-H-C.eps}
\caption{The $T$-dependence of the transverse and longitudinal
refraction indices of $\sigma$ (panel a) and $\pi^{0}$ mesons (panel
b) is plotted for various $eB$. The longitudinal refraction index of
neutral mesons is equal to unity and independent of $T$ (red dashed
lines). The transverse refraction index of neutral mesons decreases
with increasing the strength of $eB$. }\label{fig11}
\end{figure}
\begin{figure*}[hbt]
\includegraphics[width=5.5cm,height=4cm]{fig12a-screeningsigma-mass-T-eB003-mu0-H.eps}
\includegraphics[width=5.5cm,height=4cm]{fig12b-screeningsigma-mass-T-eB02-mu0-H.eps}
\includegraphics[width=5.5cm,height=4cm]{fig12c-screeningsigma-mass-T-eB05-mu0-H.eps}
\caption{The $T$-dependence of the screening mass of $\sigma$
mesons, $m_{\sigma}^{(i)}$, from (\ref{NA14}) in the transverse
$(i=1,2)$ and longitudinal $(i=3)$ directions is plotted for various
$eB$. As it turns out, the screening mass of the $\sigma$-meson in
the longitudinal direction $m_{\sigma}^{(i)}, i=3$ is the same as
its pole mass $m_{\sigma}$ (see $m_{\sigma}$ in Fig. \ref{fig9}).
}\label{fig12}
\end{figure*}
\begin{figure*}[hbt]
\includegraphics[width=5.5cm,height=4cm]{fig13a-screeningpi0-mass-T-eB003-mu0-H.eps}
\includegraphics[width=5.5cm,height=4cm]{fig13b-screeningpi0-mass-T-eB02-mu0-H.eps}
\includegraphics[width=5.5cm,height=4cm]{fig13c-screeningpi0-mass-T-eB05-mu0-H.eps}
\caption{The $T$-dependence of the screening mass of $\pi^{0}$
mesons, $m_{\pi^{0}}^{(i)}$, from (\ref{NA18}) in the transverse
$(i=1,2)$ and longitudinal $(i=3)$ directions is plotted for various
$eB$. As it turns out, the screening mass of the $\pi^{0}$-meson in
the longitudinal direction $m_{\pi^{0}}^{(i)}, i=3$ is the same as
its pole mass $m_{\pi^{0}}$ (see $m_{\pi^{0}}$ in Fig.
\ref{fig9}).}\label{fig13}
\end{figure*}
\begin{figure}[hbt]
\includegraphics[width=7.7cm,height=5cm]{fig14a-trans-sigma-H.eps}
\includegraphics[width=7.7cm,height=5cm]{fig14b-trans-pi0-H.eps}
\caption{The $T$-dependence of the screening mass of $\sigma$ (panel
a) and $\pi^{0}$ (panel b) mesons in the transverse direction is
plotted for $eB=0.03,0.2,0.3$ GeV$^{2}$ at $\mu=0$
MeV.}\label{fig14}
\end{figure}
\begin{figure}[hbt]
\includegraphics[width=7.7cm,height=5cm]{fig15-instability-H.eps}
\caption{The $T$-dependence of the squared mass of neutral pion is
plotted for $eB=0.3, 0.5, 0.7$ GeV$^{2}$.}\label{fig15}
\end{figure}
\par
Using the definitions of the directional refraction index of neutral
mesons, $u_{\sigma}^{(i)}$ and $u_{\pi^{0}}^{(i)}$, from
(\ref{NA15}) and (\ref{NA18}), as well as the $T$-dependence of
${\cal{G}}^{\mu\mu}, \mu=0,\cdots,3$ and ${\cal{F}}^{\mu\mu},
\mu=0,\cdots,3$ from Figs. \ref{fig5} and \ref{fig6}, the
$T$-dependence of the transverse ($i=1,2$) and longitudinal ($i=3$)
refraction indices of $\sigma$ and ${\pi}^{0}$ mesons, are plotted
in Fig. \ref{fig11}. From ${\cal{G}}^{00}=-{\cal{G}}^{33}$ as well
as $({\cal{F}}^{00})_{33}=-({\cal{F}}^{33})_{33}$ in (\ref{NA11}) as
well as (\ref{NA12}), the refraction index of neutral mesons in the
longitudinal direction turn out to be equal to unity, independent of
$T$ and $\mu$ (see the horizontal red dashed line in Fig.
\ref{fig11}). In contrast, the relations ${\cal{G}}^{11}\neq
{\cal{G}}^{00}$ as well as $({\cal{F}}^{11})_{33}\neq
({\cal{F}}^{00})_{33}$ from (\ref{NA11}) as well as (\ref{NA12}),
lead to $u_{\sigma}^{(i)}\neq 1$ as well as $u_{\pi^{0}}^{(i)}\neq
1$ for $i=1,2$. In Fig. \ref{fig11}, the $T$-dependence of the
transverse and longitudinal refraction indices of \textit{free} and
\textit{neutral} mesons are plotted for $eB=0.03, 0.2, 0.3$
GeV$^{2}$ and $\mu=0$ MeV. As it turns out, in the presence of
constant magnetic fields, the transverse refraction indices of
neutral mesons are always larger than unity. Moreover, the
transverse refraction index of $\sigma$ ($\pi^{0}$) meson decreases
(increases) with increasing temperature. Note that transverse
refraction indices of neutral mesons decrease with increasing the
strength of the background magnetic fields. It is interesting to
add the effect of meson fluctuations to the above results and
recalculate the $T$-dependence of longitudinal and transverse
refraction indices of neutral mesons for non-vanishing $eB$ and
$\mu$.
\par
Using the definition of the screening masses $m_{\sigma}^{(i)}$ from
(\ref{NA14}) and $m_{\pi^{0}}^{(i)}$ from (\ref{NA18}), we arrive at
the $T$-dependence of the screening masses of neutral mesons at
$\mu=0$ and for fixed $eB=0.03,0.2,0.3$ MeV. Since in the
longitudinal direction ($i=3$), the directional refraction index of
$\sigma$ and $\pi^{0}$ mesons is equal to unity, the screening
masses of the neutral mesons in this direction are the same as their
pole masses $m_{\sigma}$ and $m_{\pi^{0}}$. In Figs. \ref{fig12} and
\ref{fig13}, the $T$-dependence of the screening masses of $\sigma$
and $\pi^{0}$ mesons in the transverse ($i=1,2$) and longitudinal
($i=3$) directions with respect to the direction of the external
magnetic field are demonstrated. As it turns out, the screening mass
of $\sigma$ and $\pi^{0}$ mesons in the transverse directions are
for all fixed $eB$ always smaller than the screening masses in the
longitudinal direction. In Figs. \ref{fig14}(a) and (b), we have
compared the screening masses of $\sigma$ and $\pi^{0}$ mesons in
the transverse directions ($i=1,2$), respectively. Comparing with
the plots of Figs. \ref{fig10}(a) and (b), it turns out, that, in
contrast to $m_{\pi^{0}}^{(i)}, i=1,2$, the behavior of
$m_{\sigma}^{(i)}, i=1,2,$ by increasing the strength of the
magnetic field is different from that of $m_{\sigma}$. And, whereas
$m_{\sigma}^{(i)}, i=1,2$, increases, in general with $eB$,
$m_{\pi^{0}}^{(i)}, i=1,2$, decreases with increasing the strength
of the background magnetic field.
\par
At this stage a remark concerning the effects of stronger magnetic
fields, $eB>0.4$ GeV$^{2}$, is in order. In Fig. \ref{fig15}, the
squared mass of neutral pion, $m_{\pi^{0}}^{2}$, is plotted for
$eB=0.3,0.5,0.7$ GeV$^{2}$ (or equivalently $eB\sim 26~m_{\pi}^{2}$,
$37~m_{\pi}^{2}$ with $m_{\pi}=138$ MeV). As it turns out, for
$eB=0.5$ GeV$^{2}$ ($eB=0.7$ GeV$^{2}$), in the regime of $T<250$
MeV ($T<320$ MeV), the squared mass of neutral pion is negative.
This the pions are tachyonic. As we have mentioned before, for
$eB>0.5$ GeV$^{2}$, only lower Landau levels contribute to
$m_{\pi^{0}}^{2}$ [see Footnote 10]. The fact that in the regime of
LLL dominance and at relatively low temperature tachyonic modes
appear, is in consistency with the recent results presented in
\cite{gorbar2012}. Here, it is shown, that at sufficiently low
temperature and in the LLL approximation tachyonic instabilities
appears in the NJL model in $2+1$ dimensions. The tachyonic
instabilities appearing in $m_{\pi^{0}}^{2}$ from Fig. \ref{fig14}
is another example of the appearance of these instabilities at low
temperature and strong magnetic field in $3+1$ dimensional NJL
model.
\section{Summary and conclusions}\label{sec6}
\par\noindent
In this paper, we studied the effects of uniform magnetic fields on
the properties of free neutral mesons, $\sigma$ and $\pi^{0}$, in a
hot and dense quark matter. The aim was, in particular, to explore
possible effects of a background (constant) magnetic field on the
temperature dependence of the pole and screening masses as well as
the directional refraction indices of these mesons. To do this,
first, using an appropriate derivative expansion up to second order,
the one-loop effective action of a two-flavor NJL model at finite
$(T,\mu,eB)$ including $\sigma$ and $\vec{\pi}$ mesons is
determined. Then, using the formalism, presented in Sec. \ref{sec2},
the masses and refraction indices of these composite fields are
computed from their energy dispersion relations.
\par
As it turns out, the one-loop effective action of this model
consists of two parts, the effective kinetic part, including
non-trivial form factors, and the effective potential part, from
which we explored in Sec. \ref{sec5p1}, the complete phase portrait
of the model in $T-\mu$, $T-eB$ and $\mu-eB$ planes for various
fixed $eB, \mu$ and $T$, respectively. Here, we have mainly reviewed
the results previously presented in \cite{fayazbakhsh2010} for a
two-flavor NJL model including mesons and diquarks. We have shown
that the magnetic catalysis of dynamical chiral symmetry breaking
affects the phase portrait of this model in two different ways: i)
The type of the chiral phase transition changes from second to first
order in the presence of constant magnetic fields, and ii) the
transition temperatures and chemical potentials from the chiral
symmetry broken to chirally symmetric phase increase, in general,
with increasing the strength of the external magnetic fields. Only
at low temperatures $T<50$ MeV and high chemical potentials
$280<\mu<340$ MeV and for weak magnetic fields $eB<0.2$ GeV$^{2}$,
the transition temperature decreases with increasing the strength of
$eB$. This is related to the phenomenon of inverse magnetic
catalysis, discussed in \cite{fayazbakhsh2010, rebhan2011}.
\par
In the rest of the paper, we mainly focused on the kinetic part of
the one-loop effective action. Using the formalism originally
presented in \cite{miransky1995} for a single flavor NJL model, and
generalizing it to a multi-flavor system, in Sec. \ref{sec2}, we
have determined, in Sec. \ref{sec4}, the nontrivial form factors and
squared mass matrices corresponding to neutral mesons at finite
$(T,\mu,eB)$, up to an integration over $p_{3}$-momentum and a
summation over Landau levels. They are then performed numerically in
Sec. \ref{sec5}, where, in particular, the $T$-dependence of the
form factors and squared mass matrices of the neutral mesons are
presented for several fixed magnetic fields and zero chemical
potential. Using these quantities, we have eventually determined,
the $T$-dependence of the pole and screening masses as well as the
directional refraction index of $\sigma$ and $\pi^{0}$ mesons for
fixed magnetic fields and at vanishing as well as finite chemical
potential.
\par
Because of the assumed isospin symmetry, implying $m_{u}=m_{d}$,
charged and neutral meson masses are expected to be degenerate for
vanishing magnetic fields and at zero temperature and chemical
potential. However, as it turns out, this degeneracy breaks down in
the presence of constant magnetic fields, so that we have
$m_{\pi^{0}}\neq m_{\pi^{+}}\neq m_{\pi^{-}}$, even at zero $(T,
\mu)$. This effect is mainly because of the dimensional reduction
from $D$ to $D-2$ dimensions in the presence of constant magnetic
fields, which affects the dynamics of a fermionic system in the
longitudinal and transverse directions with respect to the direction
of the external magnetic field. As a consequence, directional
anisotropy in various quantities corresponding to the particles in
the presence of a uniform magnetic field is implied.
\par
In the present paper, we have only studied the $T$-dependence of the
masses of \textit{neutral} mesons for fixed magnetic fields and
chemical potentials. The $T$-dependence of \textit{charged} meson
masses at finite $eB$ and $\mu$, will be presented elsewhere
\cite{sadooghi2012-3}. As concerns the $\sigma$-meson mass,
$m_{\sigma}$, the expected mass degeneracy with the mass of neutral
pions, $m_{\pi^{0}}$, in the crossover region, $T>220$ MeV, is
observed for various fixed $eB$ and $\mu$. Moreover, as it turns
out, $m_{\pi^{0}}$ decreases with increasing the strength of the
magnetic field. In contrast, $m_{\sigma}$ increases with increasing
$eB$ only at low temperature $T<220$ MeV, while it decreases with
increasing $eB$ in the crossover region, $T>220$ MeV. This
qualitative behavior is consistent with the result previously
presented in \cite{skokov2011} in the framework of a
Polyakov-Quark-Meson model in $3+1$ dimensions.
\par
As concerns the refraction indices of neutral mesons, it turns out
that in the presence of constant magnetic fields, the longitudinal
and transverse refraction indices with respect to the direction of
the external magnetic field are different. Moreover, whereas the
longitudinal refraction index of neutral mesons is equal to unity,
their transverse refraction index is larger than unity. The observed
anisotropy in the refraction indices of neutral mesons is because of
the explicit breaking of Lorentz symmetry in the presence of
constant and uniform magnetic fields. The anisotropy observed in the
directional refraction index of neutral mesons is also reflected in
their screening masses, which are different in the longitudinal and
transverse directions with respect to the direction of $eB$.
According to their definitions, and because of the above mentioned
results for directional refraction indices in finite $eB$, the
screening masses of the neutral mesons in the longitudinal direction
are the same as their pole masses, while in the transverse
direction, independent of $T$ and $\mu$, their screening masses are
always smaller than their pole masses. They increase with increasing
temperature at a fixed $eB$ and $\mu$. Moreover, whereas the
screening mass of $\sigma$ in the transverse direction increases in
general with the strength of the background magnetic field, the
screening mass of $\pi^{0}$, in the same direction, decreases with
$eB$.
\par
It is worth to note that the results obtained in this paper, showing
qualitatively the effect of strong magnetic fields on the properties
of neutral mesons in a hot and magnetized quark matter, can, apart
from the physics of magnetars, be also relevant for the physics of
heavy ion collisions at RHIC and LHC. As it is known from
\cite{mclerran2007, skokov2010}, magnetic fields are supposed to be
produced in the early stage of non-central heavy-ion collisions,
and, depending on the initial conditions, e.g. the energies of
colliding nucleons and the corresponding impact parameters, they are
estimated to be in the order $eB\sim 1.5~m_{\pi}^{2}$ ($eB\sim 0.03$
GeV$^{2}$) at RHIC and $eB\sim 15~m_{\pi}^{2}$ ($eB\sim 0.3$
GeV$^{2}$) at LHC energies. Although the created magnetic field is
extremely short-living and decays very fast, it can affect the
properties of charged quarks produced in the earliest stage of
heavy-ion collisions. The way we have introduced the magnetic fields
in, e.g., (\ref{ND1b}), where the external magnetic field interacts
essentially with charged quarks, opens the possibility to describe
qualitatively the effect of external magnetic fields on
\textit{neutral} mesons built from these magnetized and charged
quarks. Note that neutral mesons, by themselves, have, because of
the lack of electric charge no interaction with the external
magnetic fields. Thus, the method used in the present paper, is in
contrast to the method used in \cite{andersen2011-pions,
anderson2012-2}, where the external magnetic field interacts only
with charged pions appearing in a magnetized chiral perturbative
Lagrangian.
\par
Being motivated by these facts, we mainly focused, in this paper, on
the effects of weak and intermediate magnetic fields, $eB\leq 0.3$
GeV$^{2}$. In Fig. \ref{fig14}, however, we have plotted the squared
mass of neutral pion as a function of temperature for $eB=0.5, 0.7$
GeV$^{2}$. Here, we have shown that at low temperature and for
strong magnetic fields, where LLL approximation is reliable,
$m_{\pi^{0}}^{2}$ becomes negative. The appearance of these kind of
tachyonic instabilities at low temperature and in the presence of
strong magnetic fields is recently observed in \cite{gorbar2012} in
the framework of an NJL model in $2+1$ dimensions, which has
application in condensed matter physics. Our results are consistent
with the main conclusions presented in \cite{gorbar2012}.
\par
Let us also notice that the model used in the present paper can be
extended in many ways, e.g. by improving the method leading to the
kinetic coefficients and mass matrices of the mesons using
functional renormalization group (RG) method, which is recently used
in \cite{skokov2011, pawlowski2012, andersen2012}.
\section{Acknowledgments}
The authors thank F. Ardalan and H. Arfaei for valuable discussions.
N. S. is grateful to R. D. Pisarski for useful comments on pion
velocity. S. S. thanks the supports of the Physics Department of
SUT, where the analytical computation of the kinetic coefficients is
performed in the framework of her master thesis. N. S. thanks the
hospitality of the Institute for Theoretical Physics of the Goethe
University of Frankfurt, Germany, where the final stage of this work
is performed. Her visit is supported by the Helmholz International
Center for FAIR within the framework of the LOEWE program launched
by the state of Hesse.
\begin{appendix}
\section*{Appendix: Dimensional Regularization of (\ref{NE13b})}\label{appA}
\setcounter{section}{1} \setcounter{equation}{0} \par\noindent
In this appendix, we will use an appropriate dimensional
regularization to regularize the $(T,\mu)$-independent part of the
effective potential
\begin{eqnarray}\label{appB1}
\lefteqn{\hspace{-0.5cm}\Omega_{\mbox{\tiny{eff}}}^{(1)}(m;eB,T=\mu=0)}\nonumber\\
&&\hspace{-0.5cm}\equiv
-3\sum\limits_{q\in\{\frac{2}{3},-\frac{1}{3}\}}|qeB|\sum^{\infty}_{p=0}\alpha_{p}\int_{-\infty}^{+\infty}
\frac{dp_{3}}{4\pi^{2}}E_q,
\end{eqnarray}
appearing in (\ref{NE13b}). Here, $E_{q}$ is given in (\ref{NE10b}).
Using the definition of $\alpha_{p}=2-\delta_{p0}$, we get
\begin{eqnarray}\label{appB2}
\lefteqn{\hspace{-0.5cm}\Omega_{\mbox{\tiny{eff}}}^{(1)}(m;eB,T=\mu=0)=-3\sum\limits_{q\in\{\frac{2}{3},-\frac{1}{3}\}}|qeB|
}\nonumber\\
&&\times
\int_{-\infty}^{+\infty}\frac{dp_{3}}{4\pi^{2}}\left(\sum\limits_{p=0}^{+\infty}2E_q-E_{q}(p=0)\right).
\end{eqnarray}
The above integral can be dimensionally regularized using
\begin{eqnarray}\label{appB3}
\int_{-\infty}^{+\infty}\frac{d^{d}p}{(2\pi)^{d}}{(\phi^2+p^2)}^{-\alpha}=
\frac{\Gamma(\alpha-\frac{d}{2})}{(4\pi)^{\frac{d}{2}}\Gamma(\alpha)~\phi^{2\alpha-d}}.
\end{eqnarray}
Setting $\alpha=-1/2$, $d=1-\epsilon$, with $\epsilon$ a small and
positive number, we arrive first at
\begin{eqnarray}\label{appB4}
\lefteqn{\hspace{-0cm}\Omega_{\mbox{\tiny{eff}}}^{(1)}(m;eB,T=\mu=0)=\frac{3\Gamma(-1+\frac{\varepsilon}{2})}{4\pi^{2}}
}\nonumber\\
&&\times\sum\limits_{q\in\{\frac{2}{3},-\frac{1}{3}\}}|qeB|^{2}\left\{\sum^{\infty}_{p=0}\frac{2}{(x_q+p)^{-1+\frac{\varepsilon}{2}}}
-\frac{1}{x_q^{-1+\frac{\varepsilon}{2}}}\right\},\nonumber\\
\end{eqnarray}
where $x_q\equiv\frac{m^{2}}{2|qeB|}$. Replacing the sum over the
Landau levels $p$ with the generalized Riemann-Hurwitz
$\zeta$-function \cite{gradshteyn}, $\zeta\left(s,a\right)\equiv
\sum_{p=0}^{\infty}\left(a+p\right)^{-s}$, we get
\begin{eqnarray}\label{appB6}
\lefteqn{\hspace{-0.8cm}\Omega_{\mbox{\tiny{eff}}}^{(1)}(m;eB,T=\mu=0)=\frac{3}{8\pi^{2}}\sum\limits_{q\in\{\frac{2}{3},-\frac{1}{3}\}}(2|qeB|)^{2-\frac{\epsilon}{2}}
}\nonumber\\
&&\hspace{-0.5cm}\times\Gamma(-1+\frac{\epsilon}{2})
\left\{\zeta\left(-1+\frac{\varepsilon}{2},x_q\right)-\frac{1}{2x_q^{-1+\frac{\varepsilon}{2}}}\right\}.
\end{eqnarray}
Expanding the above expression in the orders of $\epsilon$ up to
${\cal{O}}(\epsilon)$ and eventually taking the limit $\epsilon\to
0$, we arrive at
\begin{eqnarray}\label{appB7}
\lefteqn{\Omega_{\mbox{\tiny{eff}}}^{(1)}(m;eB,T=\mu=0)}\nonumber\\
&&=\lim\limits_{\varepsilon\to
0}\frac{3}{4\pi^{2}}\sum\limits_{q\in\{\frac{2}{3},-\frac{1}{3}\}}|qeB|^{2}\left\{
\frac{(1+6x_q^2)}{3\varepsilon}\right. \nonumber\\
&&\left.+\frac{(1-\gamma_E)(1+6x_{q}^{2})}{6}-x_q\ln
x_q-2\zeta'(-1,x_q)\right.\nonumber\\
&&\hspace{0cm}\left.-\frac{1}{6}\ln\left(2|qeB|\right)-x_{q}^{2}\ln\left(2|qeB|\right)\right\}.
\end{eqnarray}
Here, we have used the polynomial expansion of
$\zeta(-1,x_q)=-\frac{1}{2}\left(\frac{1}{6}-x_q+x_q^{2}\right)$ and
the notation
\begin{eqnarray}\label{appB8}
\zeta'(-1,x_q)\equiv\frac{d\zeta(s,x_q)}{ds}\bigg|_{s=-1}.
\end{eqnarray}
In (\ref{appB7}), $\gamma_{E}\simeq 0.577$ is the Euler-Mascheroni
constant. To eliminate the divergent term, proportional to
$\epsilon^{-1}$ in (\ref{appB7}), we use the method introduced in
\cite{providencia2008}, and add/subtract to
$\Omega_{\mbox{\tiny{eff}}}^{(1)}(m;eB,T=\mu=0)$ the contribution of
the vacuum pressure
\begin{eqnarray}\label{appB9}
P_{0}=2N_{c}N_{f}\int
\frac{d^{3}\mathbf{p}}{(2\pi)^{3}}\left(\mathbf{p}^{2}+m^{2}\right)^{1/2},
\end{eqnarray}
where $N_{c}$ and $N_{f}$ are the number of colors and flavors,
respectively. But before doing this, we will first bring $P_{0}$ in
an appropriate form. Using (\ref{appB3}) with $\alpha=-1/2$, setting
$d=3-\epsilon$, and eventually expanding the resulting expression in
the orders of $\epsilon$ up to ${\cal{O}}(\epsilon)$, the vacuum
pressure, $P_{0}$, can be brought in the form
\begin{eqnarray}\label{appB10}
P_{0}=\lim\limits_{\epsilon\to
0}\left\{\frac{N_{c}N_{f}m^{4}}{8\pi^{2}}\left(\frac{(-3+2\gamma_{E})}{4}-\frac{1}{\epsilon}+\frac{\ln
m^{2}}{2}\right)\right\}.\nonumber\\
\end{eqnarray}
Replacing, according to the definition of $x_{q}$, $m^{2}$ with
$m^{2}=2|qeB|x_{q}$, and $N_{f}$ with a summation over $q$, we
arrive at
\begin{eqnarray}\label{appB11}
P_{0}&=&\lim\limits_{\epsilon\to
0}\bigg[-\frac{3}{4\pi^{2}}\sum\limits_{q\in\{\frac{2}{3},-\frac{1}{3}\}}|qeB|^{2}\left(\frac{x_{q}^{2}(3-2\gamma_{E})}{2}\right.\nonumber\\
&&\left.+\frac{2x_{q}^{2}}{\epsilon}-x_{q}^{2}\ln x_{q}-x_{q}^{2}\ln
(2|qeB|)\right)\bigg],
\end{eqnarray}
where $N_{c}=3$ is chosen. Equivalently, $P_{0}$ can be evaluated
using a sharp cutoff $\Lambda$ \cite{providencia2008},
\begin{eqnarray}\label{appB12}
P_{0}&=&-\frac{3}{4\pi^{2}}\bigg[m^{4}\ln\left(\frac{\Lambda+\sqrt{\Lambda^{2}+m^{2}}}{m}\right)
\nonumber\\
&&-\Lambda(2\Lambda^{2}+m^{2})\sqrt{\Lambda^{2}+m^{2}}\bigg].
\end{eqnarray}
Adding and subtracting $P_{0}$ to $\Omega_{\mbox{\tiny{eff}}}^{(1)}$
from (\ref{appB7}), we finally get
\begin{widetext}
\begin{eqnarray}\label{appB13}
\lefteqn{\Omega_{\mbox{\tiny{eff}}}^{(1)}(m;eB,T=\mu=0)
=-\frac{3}{2\pi^{2}}\sum\limits_{q\in\{\frac{2}{3},-\frac{1}{3}\}}|qeB|^{2}\left\{\zeta'\left(-1,x_{q}\right)+\frac{x_{q}^{2}}{4}+\frac{x_{q}}{2}(1-x_{q})\ln
x_{q}\right\}
}\nonumber\\
&&
+\frac{3}{4\pi^{2}}\left\{m^{4}\ln\left(\frac{\Lambda+\sqrt{\Lambda^{2}+m^{2}}}{m}\right)-\Lambda(2\Lambda^{2}+m^{2})\sqrt{\Lambda^{2}+m^{2}}\right\}+\mbox{$x_{q}$
independent terms}.
\end{eqnarray}
\end{widetext}
Since $\Omega_{\mbox{\tiny{eff}}}^{(1)}(m;eB,T=\mu=0)$ is a part of
the effective potential in the gap equation with respect to $m$, and
we are only interested on the minima of this potential, we have
neglected the $x_{q}$ (or equivalently $m$) independent terms in
(\ref{appB13}). Adding the tree level and the temperature dependent
parts of the effective action, the full effective action of a
two-flavor magnetized NJL model is given by (\ref{NE14b}).
\end{appendix}
|
{
"timestamp": "2013-01-31T02:02:04",
"yymm": "1206",
"arxiv_id": "1206.6051",
"language": "en",
"url": "https://arxiv.org/abs/1206.6051"
}
|
\section{Introduction}
\subsection{Pinsker's inequality}
The inequality bearing Pinsker's name states that
for two distributions $P$ and $Q$,
\begin{equation}
D(P\Vert Q) \geq \frac{V^{2}( P,Q) }{2},\label{eq:pinsker}
\end{equation}%
where
\begin{equation*}
D(P\Vert Q) =\int \ln\paren{\frac{\d P}{\d Q}} \,\d P
\end{equation*}%
is the Kullback-Leibler divergence of $P$ from $Q$ and
$V(P,Q)=\left\Vert P-Q\right\Vert _{1}$ is their total variation distance.
Actually, the name is a bit of a misattribution, since the explicit form
of (\ref{eq:pinsker}) was obtained by
Csisz{\'a}r \cite{MR0219345} and Kullback \cite{kullback67} in 1967
and is occasionally referred to by their names.
Gradual improvements were obtained by \cite%
{MR0214714,MR1984937,MR0270834,MR0252112,0176.49106,MR0214112,MR1873865,MR0479685,MR0275575}
and others; see \cite{DBLP:conf/colt/ReidW09} for a detailed history and the
\textquotedblleft best possible Pinsker inequality\textquotedblright .
Recent extensions to general $f$-divergences may be found in \cite{MR2808583}
and \cite{DBLP:conf/colt/ReidW09}.
This
inequality has become a ubiquitous tool in
probability \cite{MR815975,MR1739680,marton96},
information theory \cite{MR2099014}, and, more recently, machine
learning \cite{MR2409394}.
It will be useful to define the function $\mathrm{KL}_{2}:(0,1)^{2}%
\rightarrow \lbrack 0,\infty )$ by
\begin{equation*}
\mathrm{KL}_{2}(p,q)=p\ln {\frac{p}{q}}+(1-p)\ln {\frac{1-p}{1-q}}
\end{equation*}%
and the so-called Vajda's tight lower bound $L$ \cite{MR0275575}:
\begin{equation*}
L(v)=\inf_{P,Q:V(P,Q)=v}D(P\Vert Q) .
\end{equation*}%
In \cite{MR1984937} an exact parametric equation of the curve $( v,L(v)) _{0<v<\infty
}$ in $\mathbb{R}^{2}$ was given:
\begin{eqnarray*}
v(t) &=& t\sqprn{ 1-\paren{ \coth t-\frac{1}{t}}^{2}} , \\
L(v(t)) &=&\ln \frac{t}{\sinh t}+t\coth t-\paren{\frac{t}{\sinh t}}^2.
\end{eqnarray*}
Some upper bounds on the KL-divergence in terms of other $f$%
-divergences are known \cite{MR1879596,MR2065310,MR1872472}, and in \cite[Lemma 3.10]{DBLP:conf/ijcai/Even-DarKM07}
it is shown that, under some conditions, $D(P\Vert Q)\le \nrm{P-Q}_1\ln(1/\min Q)$.
The latter
estimate
is vacuous for $Q$ with infinite support.
In
general,
it is impossible to upper-bound $D(P\Vert Q) $ in
terms of $V(P,Q)$, since for every $v\in (0,2]$ there is a pair of
distributions $P,Q$ with $V(P,Q)=v$ and $D(P\Vert Q) =\infty $ \cite{MR1984937}.
However, in many applications, the actual quantity of interest is not $%
D(P\Vert Q)$ for arbitrary $P$ and $Q$, but rather
\begin{equation}
D^*(v,Q)=\inf_{P:V(P,Q)\geq v}D(P\Vert Q) .\label{eq:Ddef}
\end{equation}%
For example, Sanov's Theorem \cite{MR2239987,MR1739680} (which we will
say more about below) implies that the probability that the empirical
distribution $\hat{Q}_{n}$, based on a sample of size $n$, deviates in $\ell
_{1}$ by more than $v$ from the true distribution $Q$ behaves asymptotically
as $\exp ( -nD^*(v,Q)) $.
Throughout this paper, we consider a (finite or $\sigma$-finite)
measure space $(\Omega ,\calF ,\mu )$, and all the distributions in
question will be defined on this space and assumed absolutely continuous
with respect to $\mu $; this set of distributions will be denoted by $%
\mathcal{P}$.
We will consistently use upper-case letters for distributions $P\in \mathcal{%
P}$ and corresponding lower-case letters for their densities $p$ with
respect to $\mu $.
We will use standard asymptotic notation $O(\cdot)$ and $\Omega(\cdot)$.
\subsection{Balanced and unbalanced distributions}
In this paper, we show that for the broad class of ``balanced''
distributions, $D^*(v,Q)= v^2/2 +O(v^4)$, which matches the form of the
bound in (\ref{eq:pinsker}). For distributions not belonging to this class,
we show that
\begin{eqnarray*}
D^*(v,Q)=\frac{v^2}{8\beta(1-\beta)}-O
(v^3),
\end{eqnarray*}
where $\beta$ is a measure of the ``imbalance'' of $Q$ defined below; this
may also be interpreted as a reverse Pinsker inequality.
The \emph{range} of a distribution is
\begin{equation*}
\mathcal{R}(Q)=\left\{ Q(A):A\in \calF \right\} .
\end{equation*}%
A distribution $Q$ has \emph{full range} if $\mathcal{R}(Q)=[0,1]$.
Non-atomic distributions on $\mathbb{R}$ have full range.
The \emph{balance coefficient} of a distribution $Q$ is
\begin{eqnarray*}
\beta = \inf\left\{ x\in\mathcal{R}(Q): x\ge\nicefrac{1}{2} \right\}.
\end{eqnarray*}
A distribution is \emph{balanced} if $\beta=\nicefrac{1}{2}$ and \emph{%
unbalanced} otherwise. In particular, all distributions with full range are
balanced.
Note that the balance coefficient of a discrete distribution $Q$ is bounded by\footnote{
Since we will not use this fact in the sequel, we only give a proof sketch.
The case where $q_{\max}\ge\nicefrac{1}{2}$ is trivial,
so assume $q_{\max}<\nicefrac{1}{2}$.
Consider the following greedy algorithm:
initialize $A$ to be
the empty set and
repeatedly include the heaviest available atom such that $A$'s total mass remains
under $\nicefrac{1}{2}$ (once an atom has been added to $A$, it is no longer ``available'').
If $\omega$ is the first atom
whose inclusion will
bring
$A$'s mass over $\nicefrac{1}{2}$,
either $A\cup\set{\omega}$ or $\Omega\setminus A$ establishes the
bound in (\ref{eq:balqmax}).
}
\begin{eqnarray}
\label{eq:balqmax}
\beta \le \frac{1}{2} + \frac{q_{\max}}{2}
,
\end{eqnarray}
where $q_{\max}=\max_{\omega\in\Omega}q(\omega)$.
Ordentlich and Weinberger \cite{1424321} considered the following
distribution-dependent refinement of Pinsker's inequality. For a
distribution $Q$ with balance coefficient $\beta $, define $\varphi (Q)$ by
\begin{equation*}
\varphi (Q)=\frac{1}{2\beta -1}\ln \frac{\beta }{1-\beta }
\end{equation*}%
(for $\beta =\nicefrac{1}{2}$, $\varphi (Q)=2$). It is shown in \cite%
{1424321} that
\begin{equation}
D(P\Vert Q) \geq \frac{\varphi (Q)}{4}V(P,Q)^{2}
\label{eq:OW}
\end{equation}%
for all $P,Q$, and furthermore, that $\varphi (Q)/4$ is the best $Q$%
-dependent coefficient possible:
\begin{equation}
\inf_{P}\frac{D(P\Vert Q) }{V(P,Q)^{2}}=\frac{%
\varphi (Q)}{4}.\label{eq:OW-tight}
\end{equation}%
Although the left-hand sides of (\ref{eq:Ddef}) and (\ref{eq:OW-tight})
bear a superficial resemblance, the two quantities are quite different (in
particular, the former is constrained by $V(P,Q)\geq v$). While
distribution-independent versions of (\ref{eq:OW}) exist (viz.,
(\ref{eq:pinsker})),
our main result (Theorem \ref{thm:rev-pinsker}) does
not admit a distribution-independent form. Simply put, the result in \cite%
{1424321} yields a lower bound on $D^*(v,Q)$, while we seek to
upper-bound this quantity --- and actually compute it exactly for unbalanced
distributions.
\section{Main results}
We can now state our reverse Pinsker inequality:
\begin{thm}
\label{thm:rev-pinsker} Suppose $Q\in \mathcal{P}$ has balance coefficient $%
\beta $. Then:
\begin{enumerate}
\item[(a)] For $\beta \geq \nicefrac{1}{2}$ and $0<v<1$,
\begin{equation*}
L(v)\leq D^*(v,Q)\leq
\operatorname{KL}_{2}(\beta -\nicefrac{v}{2},
\beta
) .
\end{equation*}
\item[(b)] For $\beta >\nicefrac{1}{2}$ and
$0<v<4(\beta-\nicefrac12)$,
\begin{equation*}
D^*(v,Q)=\mathrm{KL}_{2}( \beta -\nicefrac{v}{2},\beta ) .
\end{equation*}
\end{enumerate}
\end{thm}
As a comparison of orders of magnitude, note that
\begin{eqnarray*}
\mathrm{KL}_{2}( \beta -\nicefrac{v}{2},\beta ) &=&\frac{v^{2}}{%
8\beta (1-\beta )}
-\frac{(2\beta -1)v^{3}}{48{\beta }^{2}(1-\beta )^{2}}+O(v^{4}), \\
\mathrm{KL}_{2}( \nicefrac{1}{2}-\nicefrac{v}{2},\nicefrac{1}{2})
&=&\frac{v^{2}}{2}+\frac{v^{4}}{12}+O(v^{6}), \\
L(v) &=&\frac{v^{2}}{2}+\frac{v^{4}}{36}+\Omega(v^{6}),
\end{eqnarray*}%
where the
first two expansions are straightforward and the
last one is well-known \cite{MR1984937}.
Combining the bound of Ordentlich and Weinberger (\ref{eq:OW}) with Theorem %
\ref{thm:rev-pinsker}, we get
\begin{eqnarray*}
\frac{1}{4(2\beta -1)}\ln \frac{\beta }{1-\beta }v^{2} &\leq &D^*(v,Q)
\\
&\le&\mathrm{KL}_{2}( \beta -\nicefrac{v}{2},\beta ) \\
&=&\frac{v^{2}}{8\beta (1-\beta )}-O( v^{3}) .
\end{eqnarray*}%
As a consistency check, one may verify that
\begin{equation*}
\frac{1}{4(2\beta -1)}\ln \frac{\beta }{1-\beta }\leq \frac{1}{8\beta
(1-\beta )}
\end{equation*}%
for $\nicefrac{1}{2}\leq \beta <1$.
\begin{thm}
\label{thm:full-range}
If $Q\in \mathcal{P}$ has full range, then
\begin{equation*}
D^*( v,Q) =L(v),
\qquad
0<v<2
.
\end{equation*}
\end{thm}
\section{Proofs}
We will repeatedly invoke the standard fact that $D(\cdot\Vert\cdot)$ is convex in both arguments
\cite{MR2239987,erven2012}.
Our first lemma provides a structural result for extremal distributions.
Suppose a distribution $Q\in \mathcal{P}$ is given, along with an $A\in
\calF $ and a $0<v\leq 2( 1-Q(A)) $.
Denote by $\mathcal{P}(Q,A,v)$ the set of all distributions $P\in \mathcal{P}$ for which $V(
P,Q) =v$ and
$A=\set{\omega\in\Omega: q(\omega)<p(\omega)}$.
The above ``restriction'' on the range of $v$
derives from the fact that every $P\in\mathcal{P}(Q,A,v)$ must satisfy
$V(P,Q)\le2(1-Q(A))$.
\begin{lem}
\label{lem:PQA}
For all $Q\in \mathcal{P}$, $A\in \calF$ with $%
0<Q(A)<1 $, and $v\in (0,2(1-Q(A))]$, let $P^*\in \mathcal{P}$ be the measure with density
\begin{equation*}
p^*=(a\mathbbm{1}_{A}+b\mathbbm{1}_{\Omega
\setminus A})q,
\end{equation*}%
where
\begin{equation*}
a=1+\frac{v}{2Q(A)},\qquad b=1-\frac{v}{2(1-Q(A))}.
\end{equation*}%
Then $P^*$ belongs to $\mathcal{P}(Q,A,v)$, and $P^*$ is the
unique minimizer of $D(P\Vert Q)$ over $P\in \mathcal{P}(Q,A,v)$.
\end{lem}
\begin{proof}
Obviously, $P^{\ast}$ belongs to $\mathcal{P}(Q,A,v)$.
We claim that
\begin{equation}
\label{eq:PP*Q}
D(P\Vert Q) = D( P\left\Vert P^{\ast
}\right. ) +D(P^*\Vert Q)
\end{equation}
holds
for
all $P\in \mathcal{P}(Q,A,v)$,
whence the lemma follows.
Indeed, putting $B=\Omega\setminus A$ and using the fact that
\begin{eqnarray*}
D(P\Vert P^*) \!\!&=& \!\! D(P\Vert Q)-P(A)\ln a - P(B)\ln b,\\
D(P^*\Vert Q) \!\!&=&\!\! Q(A)a\ln a + Q(B)b\ln b,
\end{eqnarray*}
we see that (\ref{eq:PP*Q}) is equivalent to
the identity
$$ (Q(A)-P(A)+\nicefrac{v}{2})\ln a+(Q(B)-P(B)-\nicefrac{v}{2})\ln b =0,$$
which follows immediately from the elementary fact that
$$ P(A)-Q(A)=Q(B)-P(B)=\nicefrac{v}{2}.$$
\end{proof}
Our next result is that $D^*$ actually has a somewhat simpler form than
the original definition
(\ref{eq:Ddef}).
\begin{lem}
\label{lem:=eps}
For all distributions $Q$ and all $v>0$,
\begin{equation*}
D^*(v,Q)=\inf_{P:V(P,Q)=v}D(P\Vert Q) .
\end{equation*}
\end{lem}
\begin{proof}
For any $\varepsilon>0$, let $P_\varepsilon\in\mathcal{P}$ be such that $V(P_\varepsilon,Q)\ge v$
and
\begin{eqnarray}
\label{eq:Peps}
D(P_\varepsilon\Vert Q)< D^*(v,Q)+\varepsilon
\end{eqnarray}
and define,
for $0\le\delta\le1$,
\begin{eqnarray*}
P_{\varepsilon,\delta} = \delta P_\varepsilon+(1-\delta )Q.
\end{eqnarray*}
Since $V(P_{\varepsilon,\delta},Q)=\delta V(P_{\varepsilon},Q)$,
we may always choose $\overline\delta=\overline\delta(P_\varepsilon)$ so that
$V(P_{\varepsilon,\overline\delta},Q)=v$.
By convexity of $D(\cdot\Vert\cdot)$, we have
\begin{eqnarray*}
D(P_{\varepsilon,\overline\delta}\Vert Q) &\le& \overline\delta D(P_\varepsilon\Vert Q) + (1-\overline\delta)D(Q\Vert Q) \\
&\le& D(P_\varepsilon\Vert Q) \\
&<& D^*(v,Q)+\varepsilon,
\end{eqnarray*}
and hence
\begin{eqnarray*}
\inf_{P:V(P,Q)\ge v}D(P\Vert Q)
=
\inf_{P:V(P,Q)=v}D(P\Vert Q) .
\end{eqnarray*}
\end{proof}
\begin{proof}[Proof of
Theorem
\protect\ref{thm:full-range}]
Below we take the infimum over $\mathcal{P}$ in two steps:
first over $\mathcal{P}(Q,A,v)$ and then over $A\in\mathcal{F}$ satisfying $ Q(A)\le 1-\nicefrac{v}{2}$.
It follows from Lemmas \ref{lem:PQA} and \ref{lem:=eps} that
\begin{eqnarray}
D^*(v,Q) &=& \inf_{P\in\mathcal{P}:V(P,Q)=v}D(P\Vert Q) \nonumber\\
&=& \inf_{A} \;\inf_{P\in\mathcal{P}(Q,A,v)} D(P\Vert Q) \nonumber\\
&=& \inf_{A} \;D(P^*\Vert Q) \nonumber\\
&=& \inf_{A} \;\sqprn{ Q(A)a\ln a+Q(\Omega\setminus A)b\ln b} \nonumber\\
&=& \inf_{A} \;\operatorname{KL}_2(Q(A)+\nicefrac{v}{2},Q(A))
\label{eq:verdu},
\end{eqnarray}
where $P^*$, $a$ and $b$ are as defined in Lemma \ref{lem:PQA}.
Using the fact \cite{MR1984937,MR2817015} that
\begin{eqnarray*}
L(v) = \inf_{0<x<1-\nicefrac{v}{2}}\operatorname{KL}_2(x+\nicefrac{v}{2},x),
\end{eqnarray*}
we have that
$%
D^*(v,Q) = L(v)
$ %
for $Q$ with full range,
which proves
the claim.
\end{proof}
For the proof of Theorem \protect\ref{thm:rev-pinsker}, we will need
additional lemmata,
the first of which
will allow us to restrict our attention to distributions with
binary support. Now it is well known \cite{MR1984937} that for each pair of
distributions $P,Q$, there is a pair of binary distributions $P^{\prime
},Q'$ such that $V( P',Q') =V(P,Q)$
and $D(P'\Vert Q') =D(P\Vert Q) $
(this fact is generalized to general $f$%
-divergences in \cite{MR2817015}). However, in our case $Q$ is fixed whereas
only $P$ is allowed to vary, and so this result is not directly applicable.
Still, an analogue of this phenomenon also holds in our case.
We will consistently use $\pi$ to denote a map $\Omega\to\set{1,2}$ and
for $Q\in\mathcal{P}$,
the notation
$\pi(Q)$ refers to the distribution $(Q(\pi^{-1}(1)),Q(\pi^{-1}(2)))$ on $\set{1,2}$.
For measurable $A\subseteq\Omega$,
the map
$\pi_A:\Omega\to\set{1,2}$
is defined by
$\pi_A^{-1}(1)=A
=\Omega\setminus\pi_A^{-1}(2)
$.
\begin{lem}
\label{lem:bin-enuf} Let $Q\in\mathcal{P}$ be a distribution
whose support
contains at least two points.
Then
\begin{itemize}
\item[(i)]
For any
measurable map $\pi:\Omega\to\set{1,2}$ and any
distribution $P'=( p_{1}',p_{2}') $ on $\set{1,2}$,
there exists a $P\in\mathcal{P}$ such that
$V(P,Q)=V(P',\pi(Q))$ and
$D(P\Vert Q)=D(P'\Vert\pi(Q))$.
In particular,
\begin{eqnarray*}
D^*(v,\pi(Q))\ge D^*(v,Q),
\qquad v>0.
\end{eqnarray*}
\item[(ii)]
For all $v>0$,
there is a measurable
$\pi:\Omega \rightarrow \{1,2\}$ such that
$
D^*(v,Q)=D^*( v,\pi (Q)).
$
\end{itemize}
\end{lem}
\begin{proof}
Let $P'=( p_{1}',p_{2}')$
be a distribution on $\{1,2\}$
and define the distribution $P\in \mathcal{P}$ as the mixture
$$P=p_{1}'Q(\cdot \left\vert \pi ^{-1}(1)\right. )
+ p_{2}'Q( \cdot
\left\vert \pi ^{-1}(2)\right. ) .$$
Then
\begin{eqnarray*}
V(P,Q) &=& \int_\Omega\abs{p(\omega)-q(\omega)}\,\d\mu(\omega) \\
&=& \int_{\pi^{-1}(1)}\abs{p_1'\frac{q(\omega)}{Q(\pi^{-1}(1))}-q(\omega)}\,\d\mu(\omega)
+ \int_{\pi^{-1}(2)}\abs{p_2'\frac{q(\omega)}{Q(\pi^{-1}(2))}-q(\omega)}\,\d\mu(\omega) \\
&=& Q(\pi^{-1}(1))\abs{\frac{p_1'}{Q(\pi^{-1}(1))}-1}
+ Q(\pi^{-1}(2))\abs{\frac{p_2'}{Q(\pi^{-1}(2))}-1} \\
&=& \abs{p_1'-Q(\pi^{-1}(1))} + \abs{p_2'-Q(\pi^{-1}(2))} \\
&=& V(P',\pi(Q))
\end{eqnarray*}
and
\begin{eqnarray*}
D(P\Vert Q) &=& \int_{\Omega} p(\omega)\log\frac{p(\omega)}{q(\omega)}\, \d\mu(\omega) \\
&=& \int_{\pi^{-1}(1)} {p_1'\frac{q(\omega)}{Q(\pi^{-1}(1))}} \log\frac{p_1'q(\omega)/Q(\pi^{-1}(1))}{q(\omega)}\, \d\mu(\omega)
+ \int_{\pi^{-1}(2)} {p_2'\frac{q(\omega)}{Q(\pi^{-1}(2))}} \log\frac{p_2'q(\omega)/Q(\pi^{-1}(2))}{q(\omega)} \,\d\mu(\omega) \\
&=& p_1'\log\frac{p_1'}{Q(\pi^{-1}(1))} + p_2'\log\frac{p_2'}{Q(\pi^{-1}(2))} \\
&=& D(P'\Vert\pi(Q)).
\end{eqnarray*}
Hence,
\begin{eqnarray*}
D^*(v,\pi(Q)) &=& \inf_{P':V(P',\pi(Q))=v} D(P'\Vert \pi(Q)) \\
&=& \inf_{
P=p_{1}'Q(\cdot \, | \, \pi ^{-1}(1))
+ p_{2}'Q( \cdot\, | \, \pi ^{-1}(2))
:
V(P',\pi(Q))=v
} D(P\Vert Q) \\
&\ge&
\inf_{P:V(P,Q)=v} D(P\Vert Q) \\
&=& D^*(v,Q),
\end{eqnarray*}
where the first and last identities follow from Lemma~\ref{lem:=eps}.
This proves (i).
For any $\varepsilon>0$, the proof of Lemma~\ref{lem:=eps}
furnishes a $P_\varepsilon\in\mathcal{P}$ such that $V(P_\varepsilon,Q)=v$ and
$D(P_\varepsilon\Vert Q)<D^*(v,Q)+\varepsilon$.
Define $\pi $ by
\begin{equation*}
\pi (\omega )=%
\begin{cases}
1, & \quad\text{$p_\varepsilon(\omega )<q(\omega )$}, \\
2, & \quad\text{else}.%
\end{cases}%
~.
\end{equation*}%
Then
\begin{eqnarray*}
v=V(P_\varepsilon,Q) &=&
\int_{p_\varepsilon<q}\abs{p_\varepsilon(\omega)-q(\omega)}\,\d\mu(\omega)
+
\int_{p_\varepsilon\ge q}\abs{p_\varepsilon(\omega)-q(\omega)}\,\d\mu(\omega)\\
&=& V(\pi(P_\varepsilon),\pi(Q))
\end{eqnarray*}
and
\begin{equation*}
D(\pi(P_\varepsilon)\Vert\pi(Q))
\le
D(P_\varepsilon\Vert Q)
<
D^*(v,Q)+\varepsilon,
\end{equation*}
where the first inequality follows from the
data processing inequality \cite[Theorem 9]{erven2012}.
Since $\varepsilon>0$ is arbitrary, we have that
$D^*(v,\pi(Q))\le D^*(v,Q)$.
Taking $P'=\pi(P_\varepsilon)$,
it follows from (i) that
$D(P'\Vert\pi(Q))=D(P_\varepsilon\Vert Q)$, which proves (ii).
\end{proof}
Next, we characterize the extremal $P^*$
satisfying $D(P^*\Vert Q) =D^{\ast}(v,Q)$
in the binary case.
\begin{lem}
\label{lem:opt-q-bin}
Let $Q=( q_{0},1-q_{0}) $ be a binary
distribution with $q_{0}>\nicefrac{1}{2}$ and
$v\in ( 0,2q_{0}] $.
Then the unique $P^*$ satisfying $V( P^*,Q) =v$ and
$D(P^*\Vert Q) =D^*(v,Q)$ is
\begin{equation*}
P^*=\paren{ q_{0}-\frac{v}{2},1-q_{0}+\frac{v}{2}} .
\end{equation*}
\end{lem}
\begin{proof}
By Lemma \ref{lem:=eps}, there are at most two possibilities for $P^{\ast}$,
namely,
\[
P^{\ast}=P_{1}=\paren{ q_{0}-\frac{v}{2},1-q_{0}+\frac{v}{2}}
\]
and
\[
P^{\ast}=P_{2}=\paren{ q_{0}+\frac{v}{2},1-q_{0}-\frac{v}{2}} .
\]
(Actually, if $v>2(1-q_{0})$ then only $P_{1}$ is a valid distribution.)
A second-order Taylor expansion yields
\[
\mathrm{KL}_{2}( q_{0}+x,q_{0}) =\frac{1}{2}\frac{x^{2}}{(
q_{0}+\theta) ( 1-q_{0}-\theta) }%
\]
for some $\theta$ between $0$ and $x.$ Hence
\begin{eqnarray*}
\mathrm{KL}_{2}\paren{ q_{0}-\frac{v}{2},q_{0}}
<\frac{1}{2}
\frac{( \nicefrac{v}{2}) ^{2}}{q_{0}( 1-q_{0})}
<\mathrm{KL}_{2}\paren{ q_{0}+\frac{v}{2},q_{0}}
\end{eqnarray*}
for all $v\in( 0,2( 1-q_{0}) ] ,$ which implies that
$P^{\ast}=P_{1}$.
\end{proof}
\begin{proof}[Proof of Theorem \protect\ref{thm:rev-pinsker} (a)]
The first inequality is an immediate consequence of Lemma~\ref{lem:=eps}.
To prove the second one,
let $Q\in\mathcal{P}$ be a distribution
with balance coefficient $\beta$, and $0<v<1$.
By definition of $\beta$, for all $\varepsilon>0$ there is a measurable $A_\varepsilon\subseteq\Omega$
such that
$\beta\le
Q(A_\varepsilon)
\le\beta+\varepsilon$.
Then Lemma~\ref{lem:bin-enuf}(i) implies that
\begin{eqnarray*}
D^*(v,Q) \le D^*(v,\pi_{A_\varepsilon}(Q))
\end{eqnarray*}
and by taking $\varepsilon$ arbitrarily small,
\begin{eqnarray*}
D^*(v,Q) \le D^*(v,Q'),
\end{eqnarray*}
where $Q'=(\beta,1-\beta)$. Finally, Lemma~\ref{lem:opt-q-bin} implies
that $D^*(v,Q')=\operatorname{KL}_{2}(\beta -\nicefrac{v}{2},\beta)$.
\end{proof}
\begin{lem}
\label{lem:voks}
For every fixed $0<\delta<\nicefrac{1}{2}$,
the binary divergence
$
\mathrm{KL}_{2}(x-\delta ,x)
$
is strictly increasing in $x$ on
$\left[ \nicefrac{1}{2}+\nicefrac{\delta}{2},1\right]$.
\end{lem}
\begin{proof}
Define the function
\begin{equation*}
F(x)
=\mathrm{KL}_{2}(x-\delta ,x).
\end{equation*}%
Since KL-divergence is jointly convex in the distributions,
$F$ is a convex function.
Thus, it is sufficient to
prove that
$F'(x)$
is positive for $x=\nicefrac{1}{2}+%
\nicefrac{\delta}{2}.$
We have
\begin{equation*}
F'(x)
=
\frac{\delta +(1-x)x\ln (1-\frac{\delta }{x}%
)+(1-x)x\ln \frac{1-x}{1-x+\delta }}{(1-x)x}
\end{equation*}%
and
\begin{eqnarray*}
F'(\nicefrac{1}{2}+\nicefrac{\delta}{2})
&=& \frac{4\delta}{1-\delta^2} +2\log\frac{1-\delta}{1+\delta}
\;=:\;
G(\delta).
\end{eqnarray*}
Now $G(0)=0$ and
\begin{eqnarray*}
G'(\delta) = 8\paren{\frac{\delta}{1-\delta^2}}^2 >0,
\end{eqnarray*}
which proves the lemma.
\end{proof}
\begin{proof}[Proof of Theorem \protect\ref{thm:rev-pinsker} (b)]
Consider a $Q\in\mathcal{P}$ with balance coefficient $\beta>\nicefrac{1}{2}$ and
$0<v<4(\beta-\nicefrac12)$.
Then
Lemma \ref{lem:bin-enuf}
implies that
\begin{eqnarray*}
D^*(v,Q) &=& \inf_{A\in\calF}\; D^*(v,\pi_A(Q)) \\
&=& \inf_{A\in\calF: Q(A)>\nicefrac{1}{2}}\; D^*(v,(Q(A),(1-Q(A)))),
\end{eqnarray*}
where the second identity holds because
$D^*(v,(q_0,q_1))=D^*(v,(q_1,q_0))$.
Invoking Lemma~\ref{lem:opt-q-bin}, we have that for $Q(A)>\nicefrac12$,
\begin{eqnarray*}
D^*(v,\pi_A(Q)) = \operatorname{KL}_2\paren{ Q(A)-\frac{v}{2},Q(A)}
\end{eqnarray*}
and hence
\begin{eqnarray*}
D^*(v,Q) &=& \inf_{A: Q(A)>\nicefrac12}\; \operatorname{KL}_2\paren{ Q(A)-\frac{v}{2},Q(A)}.
\end{eqnarray*}
Since $\nicefrac12+\nicefrac{v}{4}\le\beta\le Q(A)$,
we may invoke Lemma~\ref{lem:voks} with $x=Q(A)$ and $\delta=\nicefrac{v}{2}$
to conclude that
$D^*(v,Q) = \operatorname{KL}_2\paren{ \beta-\frac{v}{2},\beta}$.
\end{proof}
\section{Application: convergence of the empirical distribution}
The results in \cite{Berend2013} have bearing on the convergence of the empirical
distribution to the true one in the total variation norm. More precisely,
the paper considers
a sequence of i.i.d. $\mathbb{N}$-valued random variables
$X_{1},X_{2},\ldots$, distributed according to $Q=(q_{1},q_{2},\ldots )$ and
denotes
\begin{equation*}
J_{n}=V( Q,\hat{Q}_{n}) ,\qquad n\in \mathbb{N},
\end{equation*}%
where $\hat{Q}_{n}$ is the empirical distribution induced by the first $n$
observations. Let us recall Sanov's Theorem \cite{MR2239987,MR1739680}, which
yields
\begin{equation*}
-\lim_{n\rightarrow \infty }\frac{1}{n}\ln Q( J_{n}-\mathbb{E}%
J_{n}>\varepsilon ) =D^*(\varepsilon ,Q).
\end{equation*}
Since the map $( X_{1},\ldots ,X_{n}) \mapsto J_{n}$ is $%
\nicefrac{2}{n}$-Lipschitz continuous with respect to the Hamming distance,
McDiarmid's inequality \cite{mcdiarmid89} implies
\begin{equation}
\label{eq:BK}
Q( \abs{ J_{n}-\mathbb{E} J_{n}} >\varepsilon )
\leq 2\exp \paren{ -\frac{n\varepsilon ^{2}}{2}},
\qquad
n\in \mathbb{N},\varepsilon>0.
\end{equation}%
Being a rather general-purpose tool,
in many cases
McDiarmid's bound does
not yield optimal estimates. Since for balanced
distributions $D^*(\varepsilon ,Q)\leq \varepsilon ^{2}/2+O(
\varepsilon ^{4}) $, we see that the estimate in (\ref{eq:BK})
actually has the optimal constant $\nicefrac{1}{2}$ in the exponent. (See
\cite[Theorem 1]{MR1822385} for other instances where the quantity $%
\varepsilon ^{2}/2$ emerges in the exponent.) We also see that the McDiarmid's
bound must be suboptimal for unbalanced distributions.
The exponential decrease of $Q( \left\vert J_{n}-\mathbb{E} J_{n}\right\vert >\varepsilon ) $ implies that $J_{n}-\mathbb{E} J_{n}$
tends to zero almost surely. We should note that $\mathbb{E} J_{n}$ will tend
to zero but the rate of convergence may be arbitrarily slow. In \cite%
{Berend2013} it was shown that
\begin{equation*}
\mathbb{E} J_{n}\leq n^{\nicefrac{\textrm{-}1}{2}}\sum_{j\in \mathbb{N}%
}q_{j}^{\nicefrac{1}{2}}
\end{equation*}%
and that for $Q$ with finite support of size $k$,
\begin{equation*}
\mathbb{E} J_{n}\leq \paren{ \frac{k}{n}} ^{\nicefrac{1}{2}}.
\end{equation*}%
In greater generality, it was shown that
\begin{equation*}
{\frac{1}{4}}(\Lambda _{n}-n^{\nicefrac{\textrm{-}1}{2}})\leq \mathbb{E} J_{n}\leq \Lambda _{n},\qquad n\geq 2,
\end{equation*}%
where
$$
\Lambda _{n}(Q)={n^{\nicefrac{\textrm{-}1}{2}}\sum_{q_{j}\geq 1/n}q_{j}^{%
\nicefrac{1}{2}}}
+
2\sum_{q_{j}<1/n}q_{j}
$$
tends to zero for $n$ tending to infinity, although the rate at which $%
\Lambda _{n}(Q)$ decays may be arbitrarily slow, depending on $Q$.
\section*{Acknowledgements}
We thank L{\'a}szl{\'o} Gy{\"o}rfi and Robert Williamson for helpful
correspondence, and in particular for bringing \cite{MR1822385} to our
attention. We are grateful to Sergio Verd\'u for a careful reading
of the paper and useful suggestions.
The comments of the anonymous referees have greatly contributed to the quality of this paper. In particular the proof of
Theorem~\ref{thm:full-range} has been considerably simplified due to their comments.
\bibliographystyle{plain}
|
{
"timestamp": "2014-02-21T02:05:10",
"yymm": "1206",
"arxiv_id": "1206.6544",
"language": "en",
"url": "https://arxiv.org/abs/1206.6544"
}
|
\section{\label{emfield}Theoretical representation of electric field at the focal plane due to tight focusing of polarized light propagating through stratified media}
As per the Debye-Wolf theory \cite{wolf59, rich59, bliop11}, an incident collimated Gaussian beam is decomposed into a superposition of plane waves having an infinite number of spatial harmonics. After focusing by a high NA lens, the resulting field amplitude can be related to the incident field by the action of a transfer function that can be written as $A = R_z(-\phi)R_y(\theta)R_z(\phi)$, where $R_i(\alpha),~i=x, y, z$ represents the SO(3) rotation matrix around the $i$ axis by angle $\alpha$. $\phi$ could be understood as the azimuthal angle, while $\theta$ is the polar angle defined with respect to $x$ and $z$ axis of the laboratory frame respectively. The coordinate system we use is shown in Fig.~\ref{coord}.
For focusing into stratified media one needs to take into account the polarization dependence of the field propagating in the media. Thus $A$ needs to incorporate $T_s~(R_s)$ and $T_p~(R_p)$ -- the Fresnel transmission (reflection) coefficients (generally complex) which include the multiple interface contributions for $s$ and $p$ polarizations respectively. Then, the resultant field amplitude $\vec{E}_{res}(\theta,\phi)$ can be written in terms of the incident amplitude $\vec{E}_{inc}(\theta,\phi)$ as
\begin{equation}
\label{fieldinout}
\vec{E}_{res}(\theta,\phi)=A\vec{E}_{inc}(\theta,\phi),
\end{equation}
where the transfer function $A$ is given by
\begin{widetext}
\begin{eqnarray}
\label{transfermatrix}
A_{1,j}^t &= R_z(-\phi)R_y(\theta)TR_z(\phi) = \left[
\begin{array}{ccc}
\ \cos \phi &\ -\sin \phi &\ 0\ \\
\ \sin \phi &\ \cos \phi &\ 0\ \\
\ 0 &\ 0 &\ 1\ \\
\end{array}
\right ] \left[
\begin{array}{ccc}
\ \cos\theta \ &\ 0 & \ -\sin\theta \\
\ 0\ &\ 1 &\ 0 \\
\ \sin \theta\ &\ 0 &\ \cos\theta\\ \end{array}
\right ] \left[
\begin{array}{ccc}
\ T_p &\ 0 &\ 0\ \\
\ 0 &\ T_s &\ 0 \\
\ 0 &\ 0 &\ T_p\\
\end{array}
\right ] \left[
\begin{array}{ccc}
\ \cos \phi &\ \sin \phi &\ 0\ \\
\ -\sin \phi &\ \cos \phi &\ 0\ \\
\ 0 &\ 0 &\ 1\ \\
\end{array}
\right ] \nonumber \\
&= \left[
\begin{array}{ccc}
\ a - b\cos 2\phi &\ -b\sin 2\phi &\ c\cos\phi\ \\
\ -b\sin 2\phi &\ a + b\cos 2\phi &\ c\sin\phi\ \\
\ -c\cos\phi &\ -c\sin\phi &\ a-b\ \\
\end{array}
\right ].
\end{eqnarray}
\end{widetext}
\begin{figure}[]
\centering{\includegraphics[scale=0.4]{4.pdf}}
\caption[i0i2comp]{(Color online) Coordinate system used in the field analysis.}
\label{coord}
\end{figure}
For the forward-propagating case, the coefficients of $A$ are given by $a = \frac{1}{2} \left (T_s+T_p \cos\theta \right )$, $b = \frac{1}{2}\left (T_s-T_p \cos\theta \right )$, and $c = T_p\sin\theta$. Note that, in general, $\vec{E}_{res}(\theta,\phi)$ would be a superposition of forward and backward propagating waves in the stratified media, though the dominant contribution would come from the forward propagating waves. In contrast, for the backward propagating waves the coefficients in eq.~\ref{transfermatrix} would be modified with $\theta$ replaced by $\pi - \theta$, and the Fresnel reflection coefficients $R_s$ and $R_p$ being used instead of the transmission ones. Then, the final field can be obtained by integrating Eq.~\ref{fieldinout} over $\theta$ and $\phi$, so that we finally have
\begin{eqnarray}\label{polarint}
\vec{E}(\rho,\psi,z)&=& i\frac{kfe^{-ikf}}{2\pi}\int_0^{\theta_{max}}\int_0^{2\pi}
\vec{E}_{res}(\theta,\phi)e^{ikz\cos\theta}\nonumber\\& \times & e^{ik\rho\sin\theta\cos(\phi-\psi)}
sin(\theta)\>d\theta d\phi,
\end{eqnarray}
where $r$ is set to $f$ -- the focal length of the lens, and the limit for the $\theta$ integral is set by the numerical aperture of the microscope objective.
The cylindrical coordinate system is chosen for the convenience it offers to track the polarization of the light beam at the output of a high numerical aperture objective, where it is completely modified from the incident polarization \cite{roh05}. For an incident linearly polarized beam of light (polarized along $x$ direction represented by a Jones vector $\left[\ 1\ 0\ 0\ \right]^T$), the electric field can be written from eq.~\ref{polarint} in matrix form as
\begin{eqnarray}\label{fieldout}
\left[
\begin{array}{c}
{E_x}\\
{E_y}\\
{E_z}\\
\end{array}
\right ] &=& C \left[
\begin{array}{lll}
I_0 + I_2\cos 2\psi & I_2\sin 2\psi & 2i I_1\cos\psi \\
I_2\sin 2\psi & I_0 - I_2\cos 2\psi & 2iI_1\sin\psi \\
-2iI_1\cos\psi & -2iI_1\sin\psi & I_0+I_2 \\
\end{array}
\right ]\nonumber\\ && \times \left[
\begin{array}{c}
{1}\\
{0}\\
{0}\\
\end{array}
\right ]
= C
\left[
\begin{array}{c}
{I_0+I_2\cos 2\psi }\\
{I_2\sin 2\psi }\\
{-i2I_1 \cos \psi }\\
\end{array}
\right ].
\end{eqnarray}
Note that this is a general expression that would work for both transmitted and reflected components. More specifically, the values for the transmitted and reflected components of $I_0(\rho),~I_1(\rho)$ and $I_2(\rho)$ would be (suffixes $t$ and $r$ imply transmitted and reflected respectively) given by
\begin{widetext}
\begin{eqnarray}
\label{transmisseqn}
I^t_0(\rho)=\int_0\limits^{min(\theta_{max},\theta_c)}E_{inc}(\theta)
\sqrt{\cos\theta}(T^{(1,j)}_s+T^{(1,j)}_p\cos\theta_j)J_0(k_1\rho\sin\theta)e^{ik_jz\cos\theta_j}sin(\theta)\>d\theta, \nonumber
\end{eqnarray}
\begin{eqnarray}
I^t_1(\rho)=\int_0\limits^{min(\theta_{max},\theta_c)}E_{inc}(\theta)
\sqrt{\cos\theta}T^{(1,j)}_p\sin\theta_j
J_1(k_1\rho\sin\theta)e^{ik_jz\cos\theta_j}\sin\theta\>d\theta, \nonumber
\end{eqnarray}
\begin{eqnarray}
I^t_2(\rho)=\int_0\limits^{min(\theta_{max},\theta_c)}E_{inc}
(\theta)\sqrt{\cos\theta}(T^{(1,j)}_s-T^{(1,j)}_p\cos\theta_j) J_2(k_1\rho\sin\theta)e^{ik_jz\cos\theta_j}\sin\theta\>d\theta,
\end{eqnarray}
\end{widetext}
and
\begin{widetext}
\begin{eqnarray}
I^r_0(\rho)=\int_0\limits^{min(\theta_{max},\theta_c)}E_{inc}(\theta)\sqrt{\cos\theta}
(R^{(1,j)}_s-R^{(1,j)}_p\cos\theta_j)J_0(k_1\rho\sin\theta)e^{-ik_jz\cos\theta_j}\sin\theta\>d\theta, \nonumber
\label{reflecteqn}
\end{eqnarray}
\begin{eqnarray}
I^r_1(\rho)=\int_0\limits^{min(\theta_{max},\theta_c)}E_{inc}(\theta)\sqrt{\cos\theta}
R^{(1,j)}_p\sin\theta_k J_1(k_1\rho\sin\theta)e^{-ik_jz\cos\theta_j}\sin\theta_1\>d\theta, \nonumber
\end{eqnarray}
\begin{eqnarray}
I^r_2(\rho)=\int_0^{min(\theta_{max},\theta_c)}E_{inc}(\theta)\sqrt{\cos\theta}
(R^{(1,j)}_s+R^{(1,j)}_p\cos\theta_j) J_2(k_1\rho\sin\theta)e^{-ik_jz \cos\theta_j}\sin\theta\>d\theta,
\end{eqnarray}
\end{widetext}
where the $\phi$ integrals have been carried out and are related to Bessel functions $J_n$.
\section{\label{160umprop}Study of radial intensity distribution in a conventional optical trap with a single RI interface}
In this section, we study the variation of radial intensity distribution in a conventional optical tweezers system having a single RI interface in the forward direction of propagation of light. This occurs since most cover slips used in optical trapping are generally refractive index (RI) matched with the microscope immersion oil (RI 1.515), and have thickness between 130 -- 160 $\mu$m so as to obtain high axial trapping depth and also to reduce the effects of spherical aberrations inside the sample.
\begin{figure}[!h]
\centering{\includegraphics[scale=0.4]{5.pdf}}
\caption[i0i2comp]{(Color online) Plot of the total intensity from Eq.~\ref{intensitylinpolar} as a function of beam propagation in the $z$-direction for a single RI interface at (a) beam focus, and (b) axial distance of 1 $\mu$m away from focus, (c) axial distance of 2 $\mu$m away from focus.}
\label{intensityplot}
\end{figure}
The total intensity distribution $I(\rho)$ for input $x$-polarized light, considering a superposition of both transmitted and reflected components, is
given by \begin{equation}
I(\rho) = \ \left|I_0\right|{^2} + \left|I_2\right|{^2} \pm 2 {\bf Re}(I_0I_2^{\star})\cos 2\psi + 2 \left|I_1\right|^2 (1 \pm \cos 2 \psi)
\label{intensitylinpolar}
\end{equation}
Fig.~\ref{intensityplot} shows the radial ($x-y$) variation of the total intensity as given by Eq.~\ref{intensitylinpolar} at different axial ($z$) distances inside the sample chamber for standard cover slips. The sample thickness in the sample chamber is 20 $\mu$m, and the geometrical beam focus is at an axial distance of 13 $\mu$m inside the chamber. As the off-axis values of the linear diattenuation term $\mathfrak{D}(\rho) = {(\bf Re}\ (I_0I_2^{\star})+ \left|I_1\right|^2)$ is weak in such cases as shown in Fig.~1 in the main manuscript, the intensity is confined almost entirely in the center. It is observed that the focal spot shows the typical Gaussian structure with weak Airy lobes in the sides, while at $z=1$ and $2~\mu$m, the beam diverges in a manner typical of TEM$_{00}$ Gaussian beams, and one observes a reduction in height of the central lobe accompanied by an increase of the FWHM of the lobe with increasing axial distance. The Airy lobes are also smeared out when one moves away from the focus, making this a similar to the classic instance of Gaussian beam propagation in air after focusing by a lens (also a single glass-air interface). Particles are thus trapped only in the center, and since the variation of intensity as a function of input polarization is also rather weak, it merely leads to a slight elongation of the focal spot as has been mentioned in literature \cite{roh05}, and does not therefore lead to transportation of particles. Indeed we typically observe clumping of particles in the central region of the beam with time as the sample solution is exposed to the trapping laser \cite{hal12}. Note that the value of $\mathfrak{D}$ also increases with axial distance leading to higher spread of the focal spot. This is the phenomenon that is referred to as spherical aberration which leads to the weakening of axial trapping at large axial distances inside the sample chamber. It is also trivial to note that increasing the thickness of cover slips would not change the radial intensity distribution in this case since there is a single RI interface that is being encountered by the forward propagating light.
\section{\label{160umprop}Study of axial intensity distribution inside sample chamber for our experimental system}
An additional feature of the electric field inside the sample chamber is the formation of axial fringes due to back-reflected waves from the top slide of our sample chamber. This is shown in the $xz$ plot in Fig.~\ref{axialdist1}. It can be seen that no fringes are seen in the absence of a top slide (Fig.~\ref{axialdist1}a). With a top slide, the location and separation of axial fringes depends on the thickness of the water layer and also on the position of the focus with respect to the top glass slide - water interface. The figures were generated with focal spot 13 $\mu$m inside the sample chamber. Note that the direction of the beam is reversed in these figures compared to ones reported in our manuscript. Off-axis trapping occurs only in the presence of a top slide in the sample chamber where single particles are trapped at the intersection of a radial maxima and an axial maxima. Also, axial fringes are located near the top slide only and die away quickly as one goes farther into the sample solution. This is intuitively understandable considering the fact that we are working with very fast diverging Gaussian beams in this case, and a constructive superposition could be achieved only when the incident beam and reflective surface are very close (up to within 4-5 $\mu$m).
\begin{figure}[h!t!]
\centering
\centering{\includegraphics[scale=0.4]{6.pdf}}
\caption{(a) $xz$ profile of the field inside the sample chamber without a top slide. (b) Axial fringes produced due to reflections from top slide ($0<x<10~\mu$m,$0<z<26~\mu$m). The intensity color bar on the right axes is given in logarithmic scale.}
\label{axialdist1}
\end{figure}
\section{\label{250umthick}Study of radial intensity distribution inside sample chamber for different cover slip thicknesses}
\begin{figure}[h!t!]
\centering{\includegraphics[scale=0.35]{7.pdf}}
\caption[]{(Color online) Simulation of the radial variation of total intensity at axial distance 1 $\mu$m away from the focus for cover slips (RI 1.575) having thickness (a) 160, (b) 200, (c) 250, and (d) 300 $\mu$m.}
\label{intensitythick}
\end{figure}
Since we obtain these effects with a stratified media, an interesting exercise is to study the effect of variation of the thickness of one of the layers of the stratified media - viz.~that of the cover slip - in the radial intensity pattern. This is depicted in Fig.~\ref{intensitythick}. The four sub-plots in the figure are for cover slip thicknesses 160, 200, 250, and 300 $\mu$m respectively (all having RI 1.575) and the radial intensity cross-section are taken at $x-y$ planes at an axial distance of 1 $\mu$m from the beam focus in each case. It is observed that with increase in cover slip thickness, the intensity in the side lobes is higher, with the intensity for 300 $\mu$m thick cover slips being around three times higher than that for 160 $\mu$m thick cover slips. This signifies that particles could be trapped in the side lobes at higher axial distances, i.e. an increase in axial depth in off-axis trapping with increase in thickness. This should thus lead to larger separation of side lobes where stable off-axial trapping would be achieved. Thus, larger distances for particle transportation could be possible using thicker cover slips. However, the limit to the thickness would be set by the maximum allowable focal depth of the trapping objective. It can also be expected that increasing the RI contrast of the stratified media would lead to very similar effects. However, these effects are being investigated presently.
|
{
"timestamp": "2013-02-07T02:01:00",
"yymm": "1206",
"arxiv_id": "1206.6518",
"language": "en",
"url": "https://arxiv.org/abs/1206.6518"
}
|
\section{Introduction}
Cross sections for vector boson production are relatively clean observables at hadron colliders. The differential $p_T$ spectra for photons, $W$ bosons and $Z$ bosons, in particular, provide excellent benchmarks to test the standard model as well as to measure parton-distribution functions (PDFs). An important application of such measurements is to compare and validate different precision calculations, performed at fixed order or including resummation. Vector boson production, therefore, gives us a rare handle to gauge the importance of higher order perturbative effects and power corrections. In this paper, we perform such a comparison on the high $p_T$ photon~\cite{Aad:2011tw} and $W$ boson spectra~\cite{Aad:2011fp} measured by the {\sc atlas} collaboration at the LHC, using around 35 pb${}^{-1}$ data.
Direct (or prompt) photon production is the production of a hard photon in association with a jet. The cleanest direct photon observable is the inclusive photon $p_T$ spectrum, which can be measured independently of any jet definition. At low $p_T$,
there is a large background from $\pi^0$ and other hadronic decays, which are often corrected for
by demanding that the photon be isolated. In the {\sc atlas} study~\cite{Aad:2011tw}, the isolation criteria was that there should be
no radiation with less than 4 GeV of energy in a cone of radius $R=0.4$ around the photon. An advantage
of studying the direct photon spectrum at high $p_T$ is that there is little background of a hard photon
coming from background processes and isolation becomes unnecessary. Formally, the backgrounds
provide only power corrections in this region.
$W$ and $Z$ production have smaller cross sections than photons, especially after paying the cost of a branching ratio to leptons, but do not require isolation. The $W$ spectrum is particularly challenging to measure since it requires an understanding of the missing energy, which unlike the lepton $p_T$, requires mastery of systematic effects over the entire detector.
The photon, $W$ and $Z$ production rates have been known at the next-to-leading order (NLO) for some time~\cite{Aurenche:1983ws,Aurenche:1987fs,Gordon:1993qc,Ellis:1981hk,Arnold:1988dp,Gonsalves:1989ar}. In this paper, we take leading order (LO) to refer to the leading order in which the vector boson $V$ has non-zero $p_T$. So this is a tree-level $2\to 2$ scattering process. NLO is one order beyond this, which includes 1-loop corrections to the $2 \to 2$ processes as well as $2\to3$ real emission graphs. While the inclusive $V$ production rates are known at NNLO, the differential $p_T$ spectra are only known at NLO. These corrections are implemented in Monte Carlo integration programs to provide the NLO distributions, such as {\sc qt}~\cite{qt}, {\sc mcfm}~\cite{mcfm}, {\sc fewz}~\cite{Melnikov:2006kv,Gavin:2010az}, and {\sc dynnlo}~\cite{Catani:2009sm}.
Beyond NLO, the theoretical calculation of the vector boson spectrum is extremely challenging, and the NNLO result is not yet known. In the absence of this result, one can improve on NLO by adding in partial results at higher orders. In some cases, such as at low $p_T$, this is absolutely critical. The fixed-order calculation diverges at small $p_T$ so one needs to resum logarithms of the form $\ln(p_T/M_V)$ to get even qualitative agreement with data. The resummation at low $p_T$ has been performed at the next-to-next-to-leading logarithmic level (NNLL)~\cite{Balazs:1997xd,Bozzi:2010xn,Becher:2010tm}.
Also at very high $p_T$, large
logarithms arise, now of the form $\ln(1-p_T/p_T^{\mathrm{max}})$, where $p_T^{\mathrm{max}}$ is the maximum kinematically possible transverse momentum for the vector boson at a given rapidity. For the photon $p_T^{\mathrm{max}} = \frac{E_{\mathrm{CM}}}{2 \cosh y}$, where $E_{\mathrm{CM}}$ is the machine center-of-mass energy (7 TeV for the 2010-2011 LHC run) and $y$ is the photon's rapidity.
The approach to improving on the fixed-order NLO calculation at high $p_T$ discussed in
~\cite{Laenen:1998qw,Becher:2009th,Becher:2011fc} was to expand around the limit $p_T = p_T^{\mathrm{max}}$.
This is the {\it machine threshold limit}.
When $p_T = p_T^{\mathrm{max}}$, there is only phase space for the vector boson to be recoiling
against a single parton, which also has $p_T = p_T^{\mathrm{max}}$. If the boson has slightly less $p_T$, then
the recoiling hadronic radiation must be jet-like, with the partons in the jet being either collinear
or soft. Thus it is natural to describe the region near the machine threshold using Soft-Collinear Effective Theory (SCET)~\cite{Bauer:2000yr,Bauer:2001yt,Beneke:2002ph}.
Using traditional methods, the threshold resummation for $W/Z$ production at large $p_T$ was performed at NLL accuracy in~\cite{Kidonakis:1999ur,Kidonakis:2003xm,Gonsalves:2005ng}. Using SCET, the accuracy was increased to NNLL in~\cite{Becher:2009th,Becher:2011fc}. The effective theory approach simplifies the computations, and having operator definitions of the various ingredients of the factorization theorem lets us recycle known results, such as the 2-loop jet function, computed for other applications. This greatly reduces the amount of new analytical results needed. Nevertheless, also in the traditional formalism, the results were recently extended to NNLL accuracy~\cite{Kidonakis:2012su,Kidonakis:2011hm} (although only the NNLO fixed-order expansion of the resummed result was computed in these papers).
In practice, the threshold logarithms are important well away from the machine threshold because of the rapid fall off of the PDFs towards larger values of the momentum fraction $x$ which ensures that most of the cross section comes from a region near the partonic threshold~\cite{Appell:1988ie,Catani:1998tm}. To what extent this dynamical enhancement of the threshold is effective was analyzed in detail in~\cite{Becher:2007ty}. There is a simple phenomenological argument why it should hold: in events with a 300 GeV gauge boson, there is almost always a jet with $p_T\sim$ 300 GeV recoiling against it. That this jet is highly collimated and nearly massless indicates that the phase space region generating the large logarithms relevant for the vector boson $p_T$ spectrum is important. Indeed, in cases such as inclusive Drell-Yan and Higgs production, where the NNLO corrections are known, it is found that 80\,--\,90\% of the perturbative corrections to the cross section arise from the threshold terms, even in cases such as Higgs production, where the fall-off of the PDFs is not very strong. We thus expect our resummed results to provide a good approximation to the full NNLO result.
\section{Effective Field Theory approach}
The effective field theory allows us to obtain logarithmic contributions to the vector boson $p_T$ spectrum which supplement the exact NLO distribution, computed in full QCD. These logarithmic terms arise from the threshold region, where the vector boson has the kinematically maximal transverse momentum. In this region, the jet recoiling against the vector boson is nearly massless. The formal derivation of these threshold terms is performed in the machine threshold limit, where $x\to 1$ for both PDFs. However, once the logarithms are extracted they can be used as additional information about the cross section in the kinematic region where $x$ has more reasonable values. That is, the same threshold logarithms are present for any $x$, since they come from a perturbative calculation in QCD which factorizes from the non-perturbative PDFs. The only difference is that away from the machine threshold, we are no longer guaranteed that the threshold terms dominate the hadronic cross section parametrically. In practice they still give rise to the bulk of the cross section thanks to the dynamical threshold enhancement discussed above. Physically, the threshold logarithms are associated with collinear radiation in the recoiling jet or soft radiation coming from the jet or the incoming partons. A method to sum these logarithms to all orders in perturbation theory using the renormalization group (RG) in SCET was developed in~\cite{Becher:2006nr,Becher:2006mr}. Its application to direct photon production was discussed in detail in~\cite{Becher:2009th}. Here we will only briefly summarize the method.
A simple variable to use for the expansion near the machine threshold is $M_X$, the mass of everything-but-$V$, where $V$ refers to the vector boson ($\gamma,W$ or $Z$). In terms of the proton momenta $P_1^\mu$ and $P_2^\mu$ and the vector boson momentum $q^\mu$,
\begin{equation}
M_X^2 = (P_1+P_2 +q)^2\,.
\end{equation}
Since $P_1$ and $P_2$ are fixed, $M_X$ is determined completely by the momentum of the vector boson. As its transverse
momentum approaches its maximum allowed value at fixed rapidity, $M_X \to 0$.
To understand the relevant degrees of freedom, it is helpful also to consider the partonic version of
$M_X$, called $m_X$. This is defined as
\begin{equation}
m_X^2 = (p_1+p_2 +q)^2\,,
\end{equation}
where $p_1^\mu = x_1 P_1^\mu$ and $p_2^\mu = x_2 P_2^\mu$ are the momenta of the partons coming out of the protons which participate in the hard interaction. Taking $m_X\to 0$ is called the {\it partonic threshold limit}. Obviously, $M_X \to 0$ implies $m_X \to 0$. The partonic $m_X$ is like $M_X$ without including the beam remnants. Away from the machine threshold, the beam remnants make $M_X$ large while $m_X$ can remain small. Thus the logarithms we actually expect to be important in affecting the vector boson $p_T$ spectrum beyond NLO can be deduced by considering the theoretically simpler but less physical partonic threshold limit.
Near the partonic threshold, the vector boson must be recoiling against a jet and there is only phase space for the jet to be nearly massless. So then we can write
\begin{equation}
m_X^2 = (p_J + k_S)^2 \approx p_J^2 +2E_J k\,,
\end{equation}
where $p_J^\mu$ and $k_S^\mu$ are the collinear and soft momenta in the jet,
$E_J$ is the jet energy and $k=p_J\cdot k_S/E_J$. It is because of this decomposition that the logarithmic terms we will extract come from either collinear effects ($p_J^2 \to 0$) or soft effects $k\to 0$. SCET implements the structure of the soft and collinear emissions on the Lagrangian level, using different fields to describe the soft and collinear partons. Via a field redefinition, the two sectors can be decoupled, after which the soft emissions are obtained from soft Wilson lines running along the directions of large momentum.
\begin{figure}[t!]
\begin{center}
\psfrag{p1}[B]{$p_1$}\psfrag{p2}[t]{$p_2$}\psfrag{pJ}[t]{$p_J$}
\psfrag{q}[B]{$q$}
\begin{tabular}{ccc}
\multirow{4}{*}{ \includegraphics[width=0.5\textwidth]{factorization.eps}} && \\[-1cm]
&\phantom{abcdef}& \includegraphics[height=0.077\textwidth]{hardloop.eps}\\[0.3cm]
&&\includegraphics[height=0.11\textwidth]{jetloop.eps}\\[0.3cm]
&& \includegraphics[height=0.11\textwidth]{softfun.eps}
\end{tabular}
\end{center}
\caption{Left: Factorization of the scattering amplitude near the partonic threshold. Right: Examples of NLO corrections to the hard, jet and soft function (from top to bottom). The thick blue lines denote partons collinear to the directions of the jet or the incoming hadrons. Soft emissions are pictured by thin red gluon lines.
\label{fig:factheorem}}
\end{figure}
The result from SCET is that the partonic cross section in the threshold region for any particular
channel has the form
\begin{equation} \label{fform}
\hat{s} \frac{\mathrm{d}\hat{\sigma} }{\mathrm{d}\hat{u}\, \mathrm{d}\hat{t}} = \hat{\sigma}^{(0)}( \hat{u},\hat{t})\,
H (\hat{u},\hat{t},M_V,\mu)
\int\! \mathrm{d} k\, J (m_X^2-2 E_J k) S(k,\mu)\, ,
\end{equation}
where the partonic Mandelstam variables are $\hat{s} = (p_1+p_2)^2$, $\hat{t} = (p_1-q)^2$ and $\hat{u} = (p_2-q)^2$, with $q$ the vector boson momentum,
with $q^2=M_V^2$.
We have factored out the Born level cross section $ \hat{\sigma}^{(0)}(\hat{u},\hat{t})$. The hadronic cross section is obtained after convoluting with PDFs and summing
over all partonic channels (see Sec. \ref{sec:intvars} below).
The factorization theorem in Eq.~\eqref{fform} is depicted in Figure~\ref{fig:factheorem}. The hard function $H$ contains the virtual corrections to the underlying hard-scattering process. There are two channels relevant for vector boson production, the Compton ($q g \to V q$) and annihilation ($q \bar{q} \to V g$) channels, and the corresponding hard functions are related by crossing symmetry. A sample NLO contribution to the hard function in the annihilation channel is the top one-loop diagram on the right-hand side of Figure~\ref{fig:factheorem}. For the photon case, the one-loop hard function was given in~\cite{Becher:2009th}, and in~\cite{Becher:2011fc} it was outlined how the hard function can be obtained for $M_V \ne 0$. For completeness, we list the one-loop result for both the Compton and annihilation channel in the Appendix. The jet function $J$ encodes the collinear emissions inside the final state jet, while collinear emissions along the initial state partons are absorbed into the PDFs. The jet function is obtained from the imaginary part of the two-point function of collinear fields (see the middle Feynman diagram on the right in Figure~\ref{fig:factheorem}). The two-loop results for the inclusive quark and gluon jet functions relevant here were obtained in~\cite{Becher:2006qw} and~\cite{Becher:2010pd}. The last Feynman diagram in the figure shows a NLO correction to the soft function, which describes the soft emissions from the energetic partons in both the initial and final state, which are encoded in Wilson lines along the corresponding directions. The corresponding soft function was recently computed to two loops in~\cite{Becher:2012za}.
In the remainder of this section, we give the resummed result for the cross section and discuss its numerical implementation. We first set up the integration over the parton momentum fractions in a form suited for threshold resummation and then give the resummed result, as well as the matching to fixed-order perturbation theory. Finally, we discuss how subtractions can be used to improve the convergence of the numerical integrations.
\subsection{Integration variables\label{sec:intvars}}
The perturbative calculation, whether NLO or including resummation, produces partonic cross sections. The observable boson $p_T$ spectrum is then obtained after convoluting with PDFs,
\begin{equation}
\frac{\mathrm{d}^2 \sigma}{\mathrm{d} y\, \mathrm{d} p_T}
= \sum_{ab}
\int_0^1 \mathrm{d} x_1 \int_0^1 \mathrm{d} x_2
f_{a/N_1} (x_1, \mu) f_{b/N_2} (x_2, \mu)
\frac{\mathrm{d}^2 \hat \sigma_{ab}}{\mathrm{d} y \,\mathrm{d} p_T} \,,
\end{equation}
where the sum is over all partonic channels, $a,b \in \{q,\bar{q}, g\}$. The partonic cross section can also be written as
\begin{equation}
\frac{\mathrm{d}^2 \hat \sigma_{ab}}{\mathrm{d} y \mathrm{d} p_T^2} =\hat s\,\frac{\mathrm{d}^2 \hat \sigma_{ab}}{\mathrm{d} \hat t \mathrm{d} \hat u}\,.
\end{equation}
At NLO, in a given channel, it has the general form
\begin{equation} \label{nloform}
\hat{s}\frac{\mathrm{d}^2 \hat \sigma}{\mathrm{d} \hat t\, \mathrm{d} \hat u} = \hat{\sigma}^{(0)} \left\{ \delta(m_X^2) + \alpha_s(\mu)\left[ \delta(m_X^2)\, h^{(1)} + \left[ \frac{1}{m_X^2} \right]_\star^{[\mu]} h^{(2)}
+ \left[\frac{\ln\frac{m_X^2}{\mu^2}}{m_X^2} \right]_\star^{[\mu]} h^{(3)} + h^{(4)} \right]\right\}
\, ,
\end{equation}
where $\hat{\sigma}^{(0)} $ and the coefficients $h_i$ are functions of the two variables $\hat t$ and $\hat u$. Because of the relation $\hat s+ \hat t+ \hat u = m_X^2 +M_V^2$ the $\delta$-function parts, effectively, only depend on a single variable. The $\star$-distributions are generalizations of the usual $+$-distributions to dimensionful variables~\cite{De Fazio:1999sv}. A N$^n$LO computation
would give distributions with logarithms up to $\ln^{2n-1}(m_X/\mu)$ in the numerator. Resummation allows one to predict these singular terms at higher orders, but not the regular parts, such as $h^{(4)}$.
The leading-order cross sections for the production of a photon are
\begin{align}\label{born}
\hat\sigma^{(0)} _{ {q \bar{q}} } &= \frac{2\, C_F\,\pi\, \alpha_{
\rm e.m.} \alpha_s(\mu)}{N_c \hat{s}} \, e_q^2\, T_0(\hat{u},\hat{t}),
& \hat\sigma^{(0)} _{qg}&= -\frac{\pi\, \alpha_{
\rm e.m.} \alpha_s(\mu)\,}{N_c \hat{s}}\, e_q^2\, T_0(\hat{s},\hat{t}) \, ,
\end{align}
where $e_q$ is the charge of the quark and
\begin{equation}
T_0(u,t) = \frac{u}{t}+\frac{t}{u}+\frac{2 M_V^2\, (M_V^2-t-u)}{t u}\,.
\end{equation}
For the photon $M_V^2=q^2=0$, but we need the same expression also for $Z$ and $W$ bosons. The amplitude $\hat\sigma^{(0)}_{gq}$ is obtained by replacing $T_0(\hat s,\hat t) \to T_0(\hat s,\hat u) $ in the expression for $\hat\sigma^{(0)} _{qg}$. To obtain the amplitude for $Z$ production, one replaces the quark charge in (\ref{born}) by
\begin{equation}
e_q^2 \to\frac{|g_L^q|^2+|g_R^q|^2}{2}
= \frac{\big( 1 - 2|e_q|\sin^2\theta_W \big)^2 + 4 e_q^2\sin^4\theta_W}%
{8\sin^2\theta_W\cos^2\theta_W} \,,
\end{equation}
where $\theta_W$ is the weak mixing angle. Since the $W$ bosons have flavor-changing couplings, the sum over flavors must be replaced by a double sum over individual quark and antiquark flavors, $q$ and $q'$. Only left-handed currents appear in this case. The relevant coupling for a $W^-$ boson produced in the annihilation of an anti-up and a down quark is
\begin{equation}
\frac{|g_L^{q'q}|^2}{2}
= \frac{|V_{q'q}|^2}{4\sin^2\theta_W} \,,
\end{equation}
where $V_{q'q}$ are elements of the quark mixing matrix.
Because of the singular behavior of the partonic cross section at threshold, it is advantageous to introduce $m_X^2$ as an integration variable. Following~\cite{Ellis:1981hk}, we perform the integrations in the form
\begin{equation}
\frac{\mathrm{d}^2 \sigma}{\mathrm{d} y \mathrm{d} p_T}
= \sum_{ab}
\int_{x_{\rm min}}^1 \frac{\mathrm{d} x_1 }{x_1 s+u- M_V^2 }\int_0^{m_{\rm max}^2} \mathrm{d} m_X^2
f_{a/N_1} (x_1, \mu) \, f_{b/N_2} (x_2, \mu) \frac{\mathrm{d}^2 \hat\sigma_{ab}}{\mathrm{d} y \mathrm{d} p_T} \, , \label{newvar}
\end{equation}
with
\begin{align}
u & =(P_2-q)^2 = M_V^2-\sqrt{s}\sqrt{M_V^2+p_T^2}e^y \, , \nonumber\\
m_{\rm max}^2 &=u + x_1 (M_X^2- u) \, , \\
x_{\rm min} &=\frac{-u}{M_X^2-u}\,.\nonumber
\end{align}
When performing the resummation, one performs an expansion of the cross section around the partonic threshold $m_X = 0$. With the choice of variables adopted in Eq.~(\ref{newvar}), the expansion is performed at fixed $x_1$ or, equivalently, at fixed $\hat t = (p_1-q)^2$. This is problematic, since the expansion then induces unphysical rapidity asymmetries. In order to avoid this and obtain a symmetric form, we integrate twice: first with Eq.~(\ref{newvar}) in the variables $x_1$ and $m_X$ and then with the $ u \leftrightarrow t$ crossed version of Eq.~(\ref{newvar}) in the variables $x_2$ and $m_X$. By taking the average of these two results, we obtain a symmetric form of the expansion around the threshold. In the case of direct photon production, a more convenient choice of integration variables is
\begin{equation}
v = 1 + \frac{\hat{t}}{\hat{s}}
\, ,
\hspace{1em}
w = - \frac{\hat{u}}{\hat{s} + \hat{t}}\,.
\end{equation}
In this case, the partonic threshold is at $w=1$. Using these variables significantly improves the numerical integration. The resummed photon cross section in $v$ and $w$ was given in~\cite{Becher:2009th}.
\subsection{Resummation and matching to fixed order}
\begin{figure}[t!]
\begin{center}
\psfrag{m}[t]{}
\psfrag{h}{$\mu_h$}
\psfrag{j}{$\mu_j$}
\psfrag{s}{$\mu_s$}
\psfrag{f}{$\mu
$}
\psfrag{H}[]{$H_{I}(\hat{u},\hat{t})$}
\psfrag{J}[]{$J_{I}(m_X^2)$}
\psfrag{S}[]{$S_{I}(k)$}
\psfrag{F}[l]{$f_1(x_1)f_2(x_2)$}
\includegraphics[width=0.6\hsize]{running}
\end{center}
\vspace{-0.5cm}
\caption{Resummation by RG evolution.\label{running}}
\end{figure}
The resummed result for the cross section is obtained by solving the RG equations for the hard, jet and soft functions. Each function is then evaluated at its natural scale, where it does not suffer from large logarithmic corrections, and evolved to a common scale $\mu$, which we identify with the factorization scale, see Figure~\ref{running}. The solution of the RG equation for the hard function in the annihilation channel is
\begin{equation}
\hat\sigma^{(0)}_{ {q \bar{q}} }(\hat u,\hat t,\mu)\, H_{ {q \bar{q}} }(\hat u,\hat t,\mu) = U_{H_{ {q \bar{q}} }}(\mu_h,\mu) \, \hat\sigma_{ {q \bar{q}} }^{(0)}(\hat u,\hat t,\mu_h) H_{ {q \bar{q}} }(\hat u,\hat t,\mu_h) \,.
\end{equation}
The matching scale $\mu_h$ is chosen to be of the order of $p_T$ to avoid large logarithms. The NLO result for $H_{ {q \bar{q}} }(\hat u,\hat t,\mu_h)$ was given in~\cite{Becher:2011fc}.
We have included the Born cross section which depends on the scale via the coupling constant $\alpha_s(\mu)$. The evolution factor $U_{H_{ {q \bar{q}} }}(\mu_h,\mu)$ for the above combination takes the form
\begin{equation}
\ln U_{H_{ {q \bar{q}} }}(\mu_h,\mu) = 2 \left( C_F + \frac{C_A}{2} \right) \left [ 2 S(\mu_h,\mu) - A_{\rm cusp}(\mu_h,\mu) \ln\frac{\hat{s}}{\mu_h^2}\right] - 2 A_{{H_{ {q \bar{q}} }}}(\mu_h,\mu)\,, \,
\end{equation}
with
\begin{align}
S (\nu, \mu) &= - \int_{\alpha_s (\nu)}^{\alpha_s (\mu)} d \alpha
\frac{\gamma_{\mathrm{cusp}} (\alpha)}{\beta (\alpha)} \int_{\alpha_s
(\nu)}^{\alpha} \frac{\mathrm{d} \alpha'}{\beta (\alpha')}\,, &
A_{\rm cusp} (\nu, \mu) &= - \int_{\alpha_s (\nu)}^{\alpha_s (\mu)} d \alpha
\frac{\gamma_{\mathrm{cusp}} (\alpha)}{\beta (\alpha)}\,.
\end{align}
Explicit expressions for these functions in RG-improved perturbation theory can be found in~\cite{Becher:2006mr}. The function $A_{H_{ {q \bar{q}} }}$ is the same as $A_{\rm cusp} (\nu, \mu)$ with \begin{equation}
\gamma_{H_{ {q \bar{q}} }} = 2 \gamma_q+ \gamma_g - \frac{C_A}{2} \ln \frac{\hat{s}^2}{\hat{t} \hat{u}}\, \gamma_{\rm cusp}
\end{equation}
replacing
$\gamma_{\mathrm{cusp}}$.
The quark and gluon anomalous dimensions $\gamma_q$ and $\gamma_g$ are given to three-loop order in~\cite{Becher:2009qa}. The evolution factor $U_{H_{qg}}$ can be obtained from the above results using the crossing relation $\hat{s} \leftrightarrow -\hat{u}$ at fixed $\hat t$ , and $U_{H_{gq}}$ follows from $U_{H_{qg}}$ using $\hat{t} \leftrightarrow \hat{u}$. The resummed results for the jet and soft functions can be obtained by solving their RG equations in Laplace space ~\cite{Becher:2006nr}. For the gluon jet function, for example, the result takes the form
\begin{align} \label{eq:RGjet}
J_g (p^2, \mu) &= U_{J_g}(\mu_j,\mu) \,\widetilde{j}_g (\partial_{\color{darkblue} \eta_{j_g}}) \frac{1}{p^2} \left(
\frac{p^2}{\mu_j^2} \right)^{\color{darkblue} \eta_{j_g}} \frac{e^{- \gamma_E {\color{darkblue} \eta_{j_g}}}}{\Gamma ({\color{darkblue} \eta_{j_g}})}\,, \nonumber
\end{align}
where $\widetilde{j}_g$ is the Laplace transform of the momentum-space jet function and
\begin{align}
U_{J_g}(\mu_j,\mu) &= \exp [- 4 C_A S (\mu_j, \mu) + 2 A_{J_g} (\mu_j, \mu)] \,, \\
{\color{darkblue} \eta_{j_g}} &= 2 C_A A_{\rm cusp} (\mu_j, \mu) \nonumber \, .
\end{align}
The corresponding results for the quark jet function and the soft functions in the different channels are all listed in~\cite{Becher:2009th}, together with the necessary anomalous dimensions. Inserting the resummed expressions into (\ref{fform}), one obtains
\begin{align}
\frac{\mathrm{d}^2 \hat\sigma_ {q \bar{q}} }{\mathrm{d} y \mathrm{d} p_T^2} &= \sigma^{(0)}_{ {q \bar{q}} }(\hat u,\hat t,\mu_h)\,H_{ {q \bar{q}} }(\hat u,\hat t,\mu_h) \, U_{H_{ {q \bar{q}} }}(\mu_h,\mu
)\, U_{J_g}(\mu_j,\mu
)\, U_{S_{ {q \bar{q}} }}(\mu_s,\mu
) \nonumber \\
&
\phantom{==} \times \widetilde{j}_g (\partial_{\color{darkblue} \eta_{j}}, \mu_j)\, \frac{1}{m_X^2} \left( \frac{m_X^2}{\mu_j^2} \right)^{\color{darkblue} \eta_j} \, \widetilde{s}_{ {q \bar{q}} } ( \partial_{\color{darkblue} \eta_{s}},
\mu_s)\, \left( \frac{m_X^2}{ E_h \mu_s} \right)^{\color{darkblue} \eta_s}\,
\frac{e^{- \gamma_E \left({\color{darkblue} \eta_{j}}+ {\color{darkblue} \eta_{s}}\right) }}{
\Gamma \left({\color{darkblue} \eta_{j}}+ {\color{darkblue} \eta_{s}}\right) } \, , \nonumber\\
&= \sigma^{(0)}_{ {q \bar{q}} }(\hat u,\hat t,\mu_h)\,H_{ {q \bar{q}} }(\hat u,\hat t,\mu_h) \, U_{H_{ {q \bar{q}} }}(\mu_h,\mu
)\, U_{J_g}(\mu_j,\mu
)\, U_{S_{ {q \bar{q}} }}(\mu_s,\mu
) \nonumber \\
&
\phantom{==} \times
\left( \frac{\mu_j^2}{E_h\mu_s} \right)^{\color{darkblue} \eta_s}
\widetilde{j}_g (\partial_{\color{darkblue} \eta_{ {q \bar{q}} }}, \mu_j)\, \widetilde{s}_{ {q \bar{q}} } \Big( \partial_{\color{darkblue} \eta_{ {q \bar{q}} }}+\ln\frac{\mu_j^2}{E_h \mu_s},\mu_s\Big)\, \frac{1}{m_X^2} \left( \frac{m_X^2}{\mu_j^2} \right)^{\color{darkblue} \eta_ {q \bar{q}} }
\frac{e^{- \gamma_E {\color{darkblue} \eta_{ {q \bar{q}} }}}}{\Gamma \left({\color{darkblue} \eta_{ {q \bar{q}} }}\right) } \, ,\label{resultsigma}
\end{align}
where $E_h = \sqrt{{\hat{t}\hat{u}}/{\hat{s}}} = p_T$ and
\begin{equation}
{\color{darkblue} \eta_{ {q \bar{q}} }} ={\color{darkblue} \eta_{j_g}} +{\color{darkblue} \eta_{s_{ {q \bar{q}} }}}
= 2 C_A A_{\rm cusp} (\mu_j, \mu) + (4 C_F - 2 C_A)
A_{\rm cusp} (\mu_s, \mu)\,. \nonumber
\end{equation}
To arrive at this expression, the convolution integral in (\ref{fform}) was explicitly carried out, which is possible because of the simple form of the RG evolved soft and jet functions.
Using the general expression Eq.~\eqref{resultsigma} for the resummed cross section, we can now explicitly compute the resummed distribution. We include almost all of the ingredients for N${}^3$LL accuracy. For N${}^3$LL, one needs the cusp anomalous dimension to four-loop order and the regular anomalous dimensions to three loops, together with the two-loop results for the hard, jet and soft functions. For the anomalous dimensions, the only missing ingredient is the unknown four-loop cusp anomalous dimension, which we estimate using the standard Pad\'e approximation as $\Gamma_4 = \Gamma_3/ (\Gamma_2)^{2}$. The second ingredient, which we do not include are the non-logarithmic pieces of the jet, soft, and hard functions. The full two-loop jet functions are known~\cite{Becher:2006qw,Becher:2010pd} and also the two-loop soft function has now been computed \cite{Becher:2012za}. The non-logarithmic piece of the two-loop hard function can be extracted from the results of~\cite{Garland:2002ak,Gehrmann:2011ab}, and we plan to include the full two-loop matching in the future. To indicate that we only have a partial result, we denote our highest order by N${}^3$LL$_{\rm p}$.
In order to include all the available perturbative information, we match to the NLO fixed-order result, which is the highest order available. To perform the matching, we use~\cite{Becher:2007ty}
\begin{equation}\label{match}
\left(\frac{\mathrm{d}^2\sigma}{\mathrm{d} y \mathrm{d} p_T^2}\right)^{\mathrm{N^3LL_{p} + NLO}}
= \left(\frac{\mathrm{d}^2\sigma}{\mathrm{d} y \mathrm{d} p_T^2}\right)^{\mathrm{N^3LL_{p}}} + \left(\frac{\mathrm{d}^2\sigma}{\mathrm{d} y \mathrm{d} p_T^2}\right)^{\mathrm{NLO}} -
\left(\frac{\mathrm{d}^2\sigma}{\mathrm{d} y \mathrm{d} p_T^2}\right)^{\mathrm{NNLL}}_{\mu_h=\mu_j=\mu_s=\mu
}\,.
\end{equation}
The subscript on the last term indicates that all
scales must be set equal to the relevant value of $\mu
$. Setting these scales equal switches off the resummation. The NNLL expression includes the one-loop corrections to the hard, jet and soft functions. Once it is evaluated with all scales equal, it reduces to the singular threshold terms of the NLO result, which must be subtracted since they are already included in the resummed result.
\subsection{Subtractions}
To compare with data, one needs to perform 4-dimensional integrals, over $x_1$, $m_X$, $y$ and $p_T$. These numerical integrals
are computationally expensive and additionally challenging because of the singular nature of the partonic cross sections. After resummation the partonic cross sections are no longer distribution valued at the partonic threshold, but behave as
\begin{equation}
\frac{1}{m_X^2} \left( \frac{m_X^2}{\mu_j^2} \right)^{\color{darkblue} \eta}
\end{equation}
near the threshold, see (\ref{resultsigma}). For the natural hierarchy $\mu_j\geq \mu_s \geq \mu $ the quantity ${\blue \eta}$ is larger than zero and the integral over $m_X$ converges. To see this, one rewrites ${\color{darkblue} \eta_{ {q \bar{q}} }}$ in the form
\begin{equation}
{\color{darkblue} \eta_{ {q \bar{q}} }} = 2 C_A \,A_{\rm cusp}\, (\mu_j, \mu_s) + 4 C_F \,A_{\rm cusp} (\mu_s, \mu)\, .
\end{equation}
In practice, however, the scale hierarchy is not very large, so that convergence can be quite slow. Furthermore, we will choose a high value of the factorization scale $\mu$, in which case the integral is no longer guaranteed to exist since ${\color{darkblue} \eta_{ {q \bar{q}} }}$ can become negative.
For some threshold variable $m$, the integral we need to evaluate has the form
\begin{equation}
I (M) = \int_0^M \mathrm{d} m\, m^{2{\blue \eta} - 1} f (m)\,.
\end{equation}
To analytically continue the result to negative ${\blue \eta}$ values, or to improve convergence, it is useful to perform subtractions~\cite{Becher:2007ty,Becher:2006mr}. A single subtraction would use
\begin{align}
I (M) &= \int_0^M \mathrm{d} m \,m^{2{\blue \eta} - 1} \Big\{ f (0) + \left[ f (m) - f (0)\right] \Big\}
\\
&= \int_0^M d m \left\{ \frac{1}{2{\blue \eta}} M^{2{\blue \eta} - 1} f (0) + m^{2{\blue \eta} - 1}
\left[ f (m) - f (0) \right] \right\}\,,
\end{align}
where the difference $f(m)-f(0)$ makes the integral more convergent. Indeed, assuming $f(m)$ is smooth,
the integral will now converge for ${\blue \eta}>-1/2$. A second subtraction would give
\begin{equation}
I (M) = \int_0^M \mathrm{d} m \left\{ \frac{1}{2{\blue \eta}} M^{2{\blue \eta} - 1} f (0) +
\frac{1}{2{\blue \eta} + 1} M^{2{\blue \eta}} f' (0) + m^{2{\blue \eta} - 1} \left[ f (m) - f (0) -
m f' (0) \right] \right\}\,,
\end{equation}
which makes the integral converge for ${\blue \eta} > -1$. Analogously, one can perform higher-order subtractions to make the integral more and more convergent. In practice, performing too many subtractions slows the code down because the expressions become lengthy and the subtraction itself can become numerically unstable.
Generally, we have found that using two or three subtractions gives stable results for the N${}^3$LL resummed integrand. Note that higher subtractions involve derivatives of PDFs, which we compute by first interpolating the PDFs.
\section{Scale setting}
The resummed distribution obtained using SCET involves four matching scales: the hard scale $\mu_h$, the jet scale $\mu_j$, the soft scale $\mu_s$ and the the factorization scale $\mu
$. Each ingredient of the factorization formula, thus, can be evaluated at its appropriate scale. This is in contrast to the fixed-order calculation which involves only a single scale, the factorization scale $\mu
$. In addition to the factorization scale, a renormalization scale $\mu_r$ is often introduced by hand in fixed-order computations, by expressing the coupling constant as $\alpha_s(\mu_r)$. Introducing a second scale $\mu_r$ may be useful as a tool to estimate uncertainties, but there is no physical justification for having $\mu \ne \mu_r$ when
working at fixed order. In contrast, the additional scales in SCET correspond to different physical regions.
If one chooses all the scales equal in the SCET calculation, the resummation is switched off.
Then only a single RG scale remains.
In this limit, SCET generates all of the terms that are singular in the threshold limit at NLO, as shown in Eq.~\eqref{nloform}. Since we include all logarithmic pieces in the two-loop hard, jet and soft functions, we also obtain all singular terms at NNLO, except for the coefficient of $\delta(m_X^2)$. In general, the singular terms amount to a large fraction of the full perturbative correction. For electroweak boson production at large transverse momentum, they amount to 70--80\% of the NLO correction~\cite{Becher:2011fc}. For inclusive Drell-Yan and Higgs production, also the NNLO correction is known, and it is found that a similarly large fraction of the perturbative corrections arises from the partonic threshold region~\cite{Becher:2007ty,Ahrens:2008nc}.
A perpetual frustration with fixed-order calculations is that they provide no insight into
how to choose the factorization scale in problems that involve physics at several scales. For example, for $W$ production either $\mu = M_W$ or $\mu=\sqrt{M_W^2 + p_T^2}$ might seem natural. For large $p_T$, the difference in the prediction between these different parameterizations is larger than the scale variation within any particular parametrization. See for example Figure~\ref{fig:nlobands}, to be discussed more below. If higher order calculations were available, as they might be soon for direct photon or $W$ production, the scale variation and parametrization
dependence would weaken. Unfortunately, however, there are only a handful of observables which have ever been computed beyond NLO. For more complicated processes, such as $W$+4 jet production, NNLO is a distant hope, and in this case there are many possible natural parameterizations. Thus the choice of parametrization for the factorization scale can amount to a significant and difficult-to-estimate source of uncertainty for a fixed-order computation.
An extremely satisfying feature of the effective field theory approach is that it {\it does} indicate
what the appropriate parametrization should be. For some observables, such as
$e^+e^-$ event shapes~\cite{Schwartz:2007ib,Becher:2008cf,Chien:2010kc,Feige:2012vc}, the scales are manifest in the resummed distribution. In hadronic collisions, one needs a somewhat more sophisticated procedure since the scales can depend on the functional form of the non-perturbative PDFs. A method for determining these scales without any arbitrary input of what is natural and what is not was proposed in~\cite{Becher:2007ty} and applied to direct photon and $W$ production in~\cite{Becher:2009th,Becher:2011fc}.
The idea is very simple: one supplements the leading-order calculation with just one part of
the SCET calculation at a time, for example, the hard, jet or soft function evaluated
at NLO in fixed-order perturbation theory. Doing this, there should be a single scale $\langle p \rangle$ which is the average value of momentum associated with these degrees of freedom, appearing in the large logarithms. After integrating over the PDFs, the perturbative correction will then have the form
\begin{equation}\label{logs}
\frac{\Delta\sigma}{\sigma^{\rm LO}} = \alpha_s(\mu)(c_2 L^2 + c_1 L + c_0)\,,
\end{equation}
with $L=\ln\frac{\mu}{\langle p\rangle}$. If $\mu$ is chosen either much lower or much higher than $\langle p \rangle$, the perturbative corrections will become large. Since we do not have an analytic expression for the distribution, due to the necessity of convoluting
with PDFs, we determine $\langle p \rangle$ numerically by computing the individual corrections to the cross section as a function of $\mu$. The result is shown in the right plot of Figure~\ref{fig:scalefig}. It has the expected form (\ref{logs}) and we see that while the jet and soft scales are concave upwards, the hard curve is concave downward. The extrema of the corresponding curves indicate the scales $\langle p \rangle$ that dominate these contributions after integrating over the PDF. It is then natural to define our default values for $\mu$ as the positions of the extrema. That there are different extrema for the different components proves that multiple scales are relevant. These scales are conflated in the fixed-order calculation.
The left plot in Figure~\ref{fig:scalefig} shows the fixed-order scale dependence. In this case, there is monotonic $\mu$ dependence, with no natural extremum.
\begin{figure}[t!]
\begin{center}\hspace{-0.5cm}
\includegraphics[height=0.3\textwidth]{nlobandsplot.eps}
\hspace{0.2cm}
\includegraphics[height=0.3\textwidth]{hjsbandsplot.eps}
\end{center}
\vspace{-0.5cm}
\caption{Scale sensitivities. These plots show the effect of adding part of the fixed-order NLO calculation to the LO calculation. The left panel shows what happens if all the $\mu$-dependent terms at NLO are added together. There is a slow monotonic logarithmic $\mu$ dependence, with no natural extremum. In contrast, when the hard, jet, or soft contributions are added separately, there are natural extrema. These extrema indicate the average value of momenta $\langle p \rangle$ appearing in the logarithms. That there are different extrema for the different components proves that multiple scales are relevant. The plots are for $W^+$ bosons, but the qualitative features are the same for all bosons. \label{fig:scalefig}}
\end{figure}
\begin{figure}[t]
\begin{center}
\hspace{-0.3cm}
\includegraphics[width=0.49\textwidth]{hardscalemins.eps}
\includegraphics[width=0.49\textwidth]{jetscalemins.eps}
\end{center}
\vspace{-0.4cm}
\caption{Natural hard and jet scales. These scales are determined, after integrating
over the PDFs, as typical energies appearing in the logarithms when the hard or jet functions at
next-to-leading order only are included. Dots show extrema for various values of boson masses,
and lines show our approximations. An important qualitative point is that the jet scale is naturally lower than the hard scale. This is an output from our numerical procedure, not an input from a formal
analysis.}
\label{fig:scalesnlo}
\end{figure}
To find the scales numerically we extract these extrema from the curves. Using a number of different
machine center-of-mass energies (we tried 2, 7, 14, and 100 TeV), $pp$ and $\bar{p} p$
collisions, and various boson masses, we determine a reasonable approximation to these points is given by
the following functional forms
\pagebreak
\begin{align} \label{scalevalues}
\mu_h &= \frac{13 p_T+ 2 M_V}{12} -\frac{p_T^2}{\sqrt{s}}\, ,\\
\mu_j &= \frac{7 p_T + 2 M_V }{12} \left (1-\frac{2p_T}{\sqrt{s}}\right) \, .
\end{align}
A comparison of this fit to the extrema for the hard and jet scales is shown in Figure~\ref{fig:scalesnlo}.
We have constrained the jet scale to vanish at the endpoint $p_T =\sqrt{s}/2$ since at that point
there is no phase space for collinear emission and the recoiling jet must be massless.
An alternative and slightly simpler hard scale choice that is quantitatively
equivalent for LHC energies is
\begin{align}
\mu_h = \frac{7 p_T+ M_V}{6} \,.
\end{align}
In the comparison to data, we use the scales in Eq.~\eqref{scalevalues}, for consistency with~\cite{Becher:2011fc}, which used these scales in comparison to Tevatron data from run II.
\begin{figure}
\begin{center}
\hspace{-0.5cm}
\includegraphics[width=0.49\textwidth]{hardscale.eps}\hspace{0.2cm}
\includegraphics[width=0.49\textwidth]{jetscale.eps}
\end{center}
\vspace{-0.4cm}
\caption{Comparison of our scale choices (blue) with the traditional choice $\mu = \sqrt{M_W^2 + p_T^2}$ (red)
for photon (dashed) and $W$ boson (solid). For the hard scale, there is not much difference.
On the other hand, the natural jet (and soft) scales are lower than the traditional choice, but
higher than the fixed scale $\mu=M_W$.
}
\label{fig:scalesMt}
\end{figure}
From Figures~\ref{fig:scalefig} or \ref{fig:scalesnlo}, it is obvious by eye that the natural jet scale is lower than the natural hard scale. While the hard scale is actually fairly close to a common scale choice $\mu = \sqrt{M_V^2+ p_T^2}$, as shown on the left of Figure~\ref{fig:scalesMt}, the jet scale is significantly lower. Both scales are higher than the fixed scale $\mu = M_V$ which was used in comparison to {\sc mcfm} in the {\sc atlas} study of the $W$ spectrum~\cite{Aad:2011fp}.
To further emphasize the importance of scale choices, we show in Figure~\ref{fig:nlobands} the relative difference in the NLO prediction from the different parameterizations for the $W$ spectrum. The band
corresponds to a region $\frac{1}{2} \mu(p_T) < \mu < 2 \mu(p_T)$ where $\mu(p_T)$ is either
$M_W$, our hard scale, or the popular choice $\sqrt{M_W^2 + p_T^2}$. For $p_T \sim M_W$, all scales give
comparable results while for large $p_T$, the fixed scale gives a prediction significantly higher than either of the other two parameterizations. If we used the jet scale instead of the hard scale, the band would be closer to the $\mu=M_W$ band. Thus it is important to choose the appropriate scale in the appropriate place to get an accurate prediction.
\begin{figure}
\begin{center}
\includegraphics[width=0.7\textwidth]{scalefig.eps}
\end{center}
\vspace{-0.4cm}
\caption{Scale variations at next-to-leading order. The blue southeast stripes show the scale variation
of the NLO calculation (called NNLO in {\sc fewz}) with $\mu=\mu_f=\mu_r=M_W$, as in the {\sc atlas} paper. The
red northeast stripes show the prediction using $\mu_f=\mu_r=\sqrt{M_W^2 + p_T^2}$ and the
black vertical stripes have $\mu_f$ and $\mu_r$ set to the scales in Eq.~\eqref{scalevalues}. Bands correspond to varying
$\mu=\mu_f=\mu_r$ by factors of two from these default scales.}
\label{fig:nlobands}
\end{figure}
\begin{figure}
\begin{center}
\includegraphics[width=1.0\textwidth]{allscalesWp6}
\end{center}
\vspace{-0.4cm}
\caption{The cross section for $W^+$ production at the LHC 7 TeV for various orders in perturbation
theory, normalized relative to the next-to-leading fixed-order curves. In the top four panels the
resummed curves are {\it not} matched to fixed order, which shows how including just the
logarithms compares to the full result. Bands come from variations of the hard, jet, soft and factorization scales by factors of 2 around our default scales are shown and taking the maxima within that variation
region. The fifth panel shows the factorization scale variation after matching. The sixth
panel is the uncertainty from adding the hard, jet, soft and factorization uncertainties in quadrature.
}
\label{fig:variations}
\end{figure}
Having determined the default values for the scales in Eq.~\eqref{scalevalues}, we can compute the resummed distribution. As discussed above, we include all ingredients for N${}^3$LL accuracy, except for the two-loop non-logarithmic terms in the hard, jet and soft functions. We match to NLO fixed order and denote our highest order resummed result by N${}^3$LL$_{\rm p}+$NLO, where the subscript ``p'' stands for partial.
Convergence in perturbation theory and the relative size of various scale variations are shown in Figure~\ref{fig:variations}. To generate the bands in this plot, we determine the maximum and minimum cross section obtained when varying each scale up and down by a factor of $2$ around its default value. In contrast to the fixed-order result, the scale dependence is not monotonic (cf. Figure~\ref{fig:scalefig}). To determine the maximum and minimum, we compute the cross section at $\frac{1}{2}$, $2$ and the central value, fit a parabola to those three points and take the maximum and minimum along the parabola.
Curves in the first four panels of Figure~\ref{fig:variations} are not matched to fixed order. The relatively
large factorization scale uncertainty comes about because the $\mu
$ dependence is only canceled
in the resummed distribution near threshold. The full $\mu
$ dependence at NLO is removed once the theory is matched to the fixed-order distribution, as it is in the bottom two panels. The combined uncertainty that we use for our final error estimates is the quadratic sum of the hard, jet,
soft and factorization scale uncertainties:
\begin{equation}
\Delta \sigma = \sqrt{ (\Delta_h \sigma)^2 + (\Delta_j \sigma)^2+(\Delta_s \sigma)^2+(\Delta_f \sigma)^2}
\end{equation}
This is a conservative error estimate. We observe that the N${}^2$LL and N${}^3$LL$_{\rm p}$ scale variation bands overlap, but the increase in the cross section from NLL to N${}^2$LL is larger than the NLL band. The same behavior is also seen when going from the LO to the NLO fixed-order result. The corresponding bands would overlap had we evaluated the LO and NLL results with leading-order PDF sets which have a larger value of $\alpha_s$, instead of the NNLO PDFs we use throughout. The increase in the cross section from NLL to N${}^2$LL is mostly due to the one-loop constants in the soft and hard functions, as can be seen from the right panel of Figure~\ref{fig:scalefig}. We have checked how much of a shift the known two-loop jet and soft function constants induce and find that it is below a per cent.
\section{Comparison with LHC data}
We are now ready to compare to LHC data. We discuss separately the two processes we study, direct photon and $W$ production. For numerical work we use the NNLO MSTW 2008 PDF set and its associated
$\alpha_s(M_Z) = 0.1171$~\cite{Martin:2009iq}. We also use $M_W = 80.399$ GeV, $\alpha_{\rm e.m.} = 127.916^{-1}$,
$\sin^2\theta_W = 0.2226$, $V_{ud} = 0.97425$, $V_{us} = 0.22543$, $V_{ub} = 0.00354$, $V_{cd} = 0.22529$,
$V_{cs} = 0.97342$ and $V_{cb} = 0.04128$.
\subsection{Direct photon}
For direct photon production, to be consistent with the comparison to Tevatron data in ~\cite{Becher:2009th},
we use the scale choices from that paper
\begin{align}
\mu_h &= p_T \,, \nonumber \\
\mu_j & = \frac{p_T}{2}\left (1-2 \frac{p_T}{\sqrt{s}}\right) \,. \label{mujchoice}
\end{align}
These are slightly simpler than those in Eq.~\eqref{scalevalues}, but equivalent within the uncertainties.
The direct photon spectrum is complicated by the requirement of photon isolation which is necessary to
remove hadronic backgrounds, such as $\pi^0$ decays. The {\sc atlas} study~\cite{Aad:2011tw}
required energy in an cone of $R=0.4$ around the photon to have energy less than $4\,{\rm GeV}$.
To include this effect rather than matching to the inclusive fixed-order NLO calculation, we match
to the NLO calculation with isolation and fragmentation contributions using the Monte Carlo program
{\sc jetphox}~\cite{Catani:2002ny}.
\begin{figure}
\begin{center}
\includegraphics[width=0.55\textwidth]{DPAtlas0.eps}
\includegraphics[width=0.55\textwidth]{DPAtlas1.eps}
\includegraphics[width=0.55\textwidth]{DPAtlas2.eps}
\end{center}
\vspace{-0.4cm}
\caption{Comparison of theory to {\sc atlas} data for direct photon in various rapidity regions.
The normalization is such that 0 corresponds to the inclusive direct photon
spectrum at NLO without isolation or fragmentation. The NLO cross sections including
fragmentation and isolation are slightly higher, especially at low $p_T$, since the enhancement
due to fragmentation outweighs the reduction due to isolation. Uncertainties on the NLO
correspond to factor of 2 variation of $\mu_f=\mu_r$ around its default value of $p_T$. For
the resummed curves, which also include isolation and fragmentation, uncertainties are hard, jet, soft and factorization variations by
factors of 2 added in quadrature. Uncertainties from fragmentation and isolation are {\it not} included,
and may be large.}
\label{fig:dpcomp}
\end{figure}
The {\sc atlas} data in~\cite{Aad:2011tw} includes 35 pb${}^{-1}$ of data separated into three rapidity
regions. The comparison of the theory prediction at NLO and at N${}^3$LL$_{\rm p}$+NLO order is shown in Figure~\ref{fig:dpcomp}. The theory and data are in agreement within uncertainties. It will
be interesting to update this comparison to a large data set, particularly if it includes
higher $p_T$.
\subsection{\boldmath $W$ boson}
The calculation of the $W$ boson $p_T$ spectrum at N${}^3$LL$_{\rm p}$+NLO order is significantly more challenging numerically than direct photon production, despite the identical factorization formulae. The extra scale, the boson mass, complicates the kinematics, which makes the integrals converge more slowly and the scale choices more complicated. Moreover, experimentally, since only the charged lepton from the $W$ decay is measured, acceptances
have to be included in comparing the inclusive $W$ spectrum to the measured distribution.
The acceptance cuts used by {\sc atlas} in~\cite{Aad:2011fp}
were
\begin{equation}
p_T^{l} > 20\, {\rm GeV}, \quad |\eta^{l}| < 2.4, \quad p_T^\nu > 25\, {\rm GeV}\,,
\end{equation}
and
\begin{equation}
m_T^W \equiv \sqrt{ ( |{\vec p_T}^l| +|{\vec p_T}^\nu|)^2 - |\vec{p_T}^l +\vec{p_T}^\nu|^2} \ge
40\, {\rm GeV}.
\end{equation}
where $l$ is an electron or muon.
\begin{figure}
\begin{center}
\includegraphics[width=0.65\textwidth]{AccPlot}
\end{center}
\vspace{-0.4cm}
\caption{Kinematic acceptances according to {\sc atlas} cuts. There is little difference in the acceptances for $W^+$ or $W^-$ bosons. However, there is about a 2\% increase in the efficiency when going from leading-order to next-to-leading order in perturbation theory (all computed with {\sc fewz}).
The NLO acceptances are used in our comparison with {\sc atlas} data.}
\label{fig:acceptances}
\end{figure}
To apply these cuts to our inclusive sample, we multiply our inclusive cross sections by acceptances,
given by the ratio of the inclusive $W$ cross section to the $W$ cross section in the fiducial volume as a function of $p_T$. These acceptances are shown in Figure~\ref{fig:acceptances}. We calculate them at LO and NLO for $W^+$ and $W^-$ using the program
{\sc fewz}~\cite{Gavin:2010az}. We find no significant difference between the leading order and next-to-leading order acceptances. For the numerical work, we use a smooth function fit to the inclusive NLO acceptances for $W^+ + W^-$, also shown in Figure~\ref{fig:acceptances}.
The total cross sections we find at the $\sqrt{s} = 7$ TeV LHC, using $\mu_f = \mu_r = M_W$ are
\begin{align}
\sigma( p p\to W^+ \to \mu^+ \nu)_{\mathrm{inc}} &= (6204\pm0.7)\, {\mathrm{pb}}\,, \qquad
\sigma( p p\to W^+\to \mu^+ \nu)_{\mathrm{fid}} = (3061 \pm 3.0)\, {\mathrm{pb}}\,, \nonumber\\
\sigma( p p\to W^-\to \mu^- \bar{\nu})_{\mathrm{inc}} &= (4326\pm0.5)\, {\mathrm{pb}}\,, \qquad
\sigma( p p\to W^-\to \mu^- \bar{\nu})_{\mathrm{fid}} = (2038\pm 1.9) \, {\mathrm{pb}}\,,
\end{align}
where ``inc'' refers to the inclusive cross section and ``fid'' to the fiducial cross section (with cuts). Errors
are integration errors from {\sc fewz}.
Dividing these by the branching ratio to muons, $BR(W\to \mu \nu) = 0.1083$ gives the total inclusive
cross section. To compare to data, we take our theoretical calculation of the inclusive
differential cross section, multiply by the acceptance curves and then
divide by the total cross section in the fiducial volume, $47.04\,{\rm nb}$. This lets us compare directly to the {\sc atlas} data, which is normalized to the total number of events in the fiducial volume.
\begin{figure}
\begin{center}
\includegraphics[width=0.8\textwidth]{AtlasWpWmClean}
\end{center}
\vspace{-0.4cm}
\caption{Comparison of theory to {\sc atlas} data for the $W$ spectra. The red band is the NLO prediction,
using $\mu_f = \mu_r = M_W$, as in~\cite{Aad:2011fp}. The N${}^3$LL${}_{\mathrm p}$ + NLO prediction, in green, is
in excellent agreement with the data. Dashed blue lines indicate PDF uncertainties which are of
order the scale uncertainties at N${}^3$LL${}_{\mathrm p}$ + NLO order.}
\label{fig:atlasWcompare}
\end{figure}
The comparison to the {\sc atlas} data is shown in Figure~\ref{fig:atlasWcompare}. The agreement is excellent. In this plot, results
are shown normalized to the NLO prediction with $\mu_f=\mu_r= M_W$. As we have argued in the previous section,
this is not a good scale choice in the large $p_T$ region.
We use $\mu=M_W$ as the basis for our comparison since it is the scale choice used by {\sc atlas} in~\cite{Aad:2011fp}, and, therefore, our NLO calculation can be directly compared to their calculation labeled
MCFM in Figure~7 of~\cite{Aad:2011fp}.
There are two important qualitative conclusions that can be drawn from Figure~\ref{fig:atlasWcompare}. The first
is that scale choices are important. Although the data is within the NLO uncertainty, comparing the NLO band in this
plot, which uses a fixed scale $\mu=M_W$ with those in Figure~\ref{fig:nlobands}, we see that the downward
trend in the data indicates a clear preference that $\mu$ should increase with $p_T$. However, as we have argued, there is no natural scale choice at fixed order, since multiple scales are present. Our procedure of determining these scales numerically is supported quantitatively by the
agreement between the N${}^3$LL$_{\rm p}$+NLO band and the data in this figure.
The second qualitative conclusion concerns the PDF uncertainties. These are shown as the blue dashed lines in Figure~\ref{fig:atlasWcompare}. The PDF uncertainties are smaller than the uncertainties on the data and the NLO
scale uncertainties, but of the same order as the scale uncertainties of the resummed distribution. This indicates that PDF fits could be improved using the $W$ spectra, but only if resummation is included (or perhaps if the NNLO result becomes available).
\section{Conclusions}
In this paper, we have compared theoretical predictions for the direct photon and $W$ boson spectra at high $p_T$ to measurements performed by the {\sc atlas} collaboration using LHC data. The predictions were
performed using the exact cross section at NLO in $\alpha_s$ (the highest order known),
supplemented with additional terms to all orders in $\alpha_s$ coming from a threshold expansion. These extra terms correspond to
large logarithms associated with infrared singularities of the recoiling jet. To isolate these terms,
the resummed calculation is performed near the partonic threshold, in which the $p_T$ of the vector boson
is maximal for the given value of the partonic center-of-mass energy. In this limit, the cross section factorizes and the logarithmic terms can then included to all orders in perturbation theory. These terms usually give the dominant contribution to the the cross section. In the photon case, the fragmentation cross section and isolation corrections were added also, using the program {\sc jetphox}.
A main advantage of the resummed cross section, which was calculated using effective theory in~\cite{Becher:2009th} and~\cite{Becher:2011fc}, is that it has well-defined scales associated with different phase space regions. Unlike a fixed-order calculation, which merges all the scales into one, the effective field theory allows one to choose the scales appropriate for the relevant regions. We employed a numerical procedure to determine which scales are appropriate. This removes a source of uncertainty from the fixed-order calculation, namely, which value of the scale $\mu$ should be adopted. Typical fixed-order calculations choose scales like $M_W$, $p_T$ or $\sqrt{p_T^2 + M_W}$. We find that a single such parametrization is insufficient: while the hard scale is naturally close to $\sqrt{p_T^2 + M_W}$, the jet and soft scales are naturally lower.
The results of our comparison with data are shown in Figures~\ref{fig:dpcomp} and~\ref{fig:atlasWcompare}. The photon case is complicated by the requirement of photon isolation, while the $W$ case is complicated by the missing energy and the necessary acceptance cuts on the lepton. We found good agreement for the direct photon case and excellent agreement for the case of the $W$ boson. In the $W$ boson case, one can clearly see the importance of proper scale choices. Moreover, the reduction of theoretical uncertainty when resummation is included is enough to make it comparable to the PDF uncertainty. These comparisons provide a convincing demonstration of the relevance of resummation for LHC physics.
In the future, it would be useful to compare our prediction to the direct photon data available from {\sc cms} \cite{Chatrchyan:2011ue}. It would also be interesting to compare to Drell-Yan spectra from intermediate $Z$ bosons at high $p_T$\cite{Chatrchyan:2011wt,Aad:2011gj} and to direct photon and $W$ data at higher luminosity. Improvements of the theoretical description could be achieved by including the full two-loop hard, jet and soft functions and the fixed-order NNLO calculation, once it becomes available. Furthermore, on top of the QCD effects, one should also include electroweak Sudakov logarithms~\cite{Kuhn:2004em,Hollik:2007sq}, which will have a noticeable effect at high $p_T$. In addition, we hope to eventually provide a publicly available code to produce the resummed results.
\section{Acknowledgments}
The authors would like to thank F.~Bucci, A.~Hamilton and G.~Marchiori for discussions of the direct photon process and C.~Mills and J.~Guimares da Costa for discussions about the {\sc atlas} $W$ measurement. We thank R.~Gonsalves and F.~Petriello for help with the programs {\sc q${}_T$} and {\sc fewz}, respectively, and X.~Tormo for comments on the manuscript. The work of TB and CL is supported by the Swiss National Science Foundation (SNF) under grant 200020-140978 and the Innovations- und Kooperationsprojekt C-13 of the Schweizerische Universit\"atskonferenz (SUK/CRUS). MDS is supported by the US Department of Energy under grant DE-SC003916.
\begin{appendix}
\section{One-loop hard function}
In this appendix we give the result for the hard function in both the annihilation and the Compton channel. The functions are related by crossing symmetry, but the analytic continuation from one channel to the other is not entirely trivial. The hard function is obtained from the result (A.9) in \cite{Arnold:1988dp} after performing renormalization.
For the annihilation channel, we have
\begin{align}
H_ {q \bar{q}} (u,t) &= 1 + \frac{\alpha_s}{4\pi} \left\{ C_A \frac{\pi^2}{6} + C_F \left(-16 + \frac{7\pi^2}{3} \right)
+ 2 C_A \ln^2\frac{s}{M_V^2} + C_A \ln^2\frac{M_V^2 - t}{M_V^2} \right.
\nonumber
\\
& \left. + C_A \ln^2\frac{M_V^2 - u}{M_V^2} + \ln\frac{s}{M_V^2} \left(-6 C_F - 2 C_A \ln\frac{s^2}{t u}\right)
- C_A \ln^2\frac{t u}{M_V^4} - 6 C_F \ln\frac{\mu^2}{s} \right.
\nonumber
\\
& \left. - 2 C_A \ln\frac{s^2}{t u} \ln\frac{\mu^2}{s}
+ \left(-C_A - 2 C_F\right) \ln^2\frac{\mu^2}{s} \right.
\nonumber
\\
& \left. + 2 C_A \Li2\left(\frac{M_V^2}{M_V^2 - t}\right)
+ 2 C_A \Li2\left(\frac{M_V^2}{M_V^2 - u}\right)
\right\}
\nonumber
\\
& + \frac{\alpha_s}{4\pi} \frac{2}{T_0(u,t)} \left\{
C_F \left( \frac{s}{s + t} + \frac{s + t}{u}+ \frac{s}{s + u} + \frac{s + u}{t} \right) \right.
\nonumber
\\
& \left.+ \left(-C_A + 2 C_F\right) \left[-\frac{M_V^2 \left( t^2 + u^2 \right)}{t u \left(t + u\right)}
+ 2 \left(\frac{s^2}{\left(t + u\right)^2} + \frac{2 s}{t + u}\right) \ln\frac{s}{M_V^2}\right] \right.
\nonumber
\\
& \left. + \left( C_A\frac{t}{s + u}
+ C_F\frac{4 s^2 + 2 s t + 4 s u + t u}{\left(s + u\right)^2} \right) \ln\frac{-t}{M_V^2} \right.
\nonumber
\\
& \left. + \left( C_A\frac{u}{s + t}
+ C_F \frac{4 s^2 + 4 s t + 2 s u + t u}{\left(s + t\right)^2} \right) \ln\frac{-u}{M_V^2} \right.
\nonumber
\\
& \left. - \left(-C_A + 2 C_F\right)
\left[ \frac{s^2 + \left(s + u\right)^2}{t u} \left(\frac{1}{2} \ln^2\frac{s}{M_V^2} - \frac{1}{2} \ln^2\frac{M_V^2 - t}{M_V^2}
+ \ln\frac{s}{M_V^2} \ln\frac{-t}{s-M_V^2 } \right.\right.\right.
\nonumber
\\
& \left.\left.\left. + \Li2\left(\frac{M_V^2}{s}\right)
- \Li2\left(\frac{M_V^2}{M_V^2 - t}\right) \right) \right.\right.
\nonumber
\\
& \left.\left. + \frac{s^2 + \left(s + t\right)^2}{t u} \left( \frac{1}{2} \ln^2\frac{s}{M_V^2} - \frac{1}{2} \ln^2\frac{M_V^2 - u}{M_V^2}
+ \ln\frac{s}{M_V^2} \ln\frac{-u}{s-M_V^2} \right.\right.\right.
\nonumber
\\
& \left.\left.\left. + \Li2\left(\frac{M_V^2}{s}\right)
- \Li2\left(\frac{M_V^2}{M_V^2 - u}\right)
\right)
\right]
\right\}\, ,
\end{align}
and the Compton channel result reads
\begin{align}
H_{qg}(u,t) &= 1 + \frac{\alpha_s}{4\pi} \left\{
C_A\frac{7 \pi^2}{6} + C_F \left(-16 + \frac{\pi^2}{3}\right) - 6 C_F \ln\frac{s}{M_V^2}
-C_A \ln^2\frac{-s t}{M_V^4} + C_A \ln^2\frac{M_V^2-t}{M_V^2} \right.
\nonumber
\\
& \left. + 2 C_A \ln\frac{\left(s-M_V^2 \right) t}{M_V^2 u} \ln\frac{-u}{M_V^2}
+ C_A \ln^2\frac{-u}{M_V^2} - 2 C_A \ln\frac{\left(M_V^2 - s\right) s t}{M_V^2 u^2} \ln\frac{-u}{s} \right.
\nonumber
\\
& \left. - 2 C_A \ln^2\frac{-u}{s} - 2 C_F \ln^2\frac{-u}{s}
+ \left(-6 C_F + 2 C_A \ln\frac{t}{u} + 4 C_F \ln\frac{-u}{s} \right) \ln\frac{\mu^2}{s} \right.
\nonumber
\\
& \left. - \left(C_A + 2 C_F\right) \ln^2\frac{\mu^2}{s}
- 2 C_A \Li2\left(\frac{M_V^2}{s}\right) + 2 C_A \Li2\left(\frac{M_V^2}{M_V^2 - t}\right)
\right\}
\nonumber
\\
& + \frac{\alpha_s}{4\pi} \frac{2}{T_0(s,t)} \left\{
C_F \left(\frac{u}{s + u} + \frac{s + u}{t} + \frac{u}{t + u} + \frac{t + u}{s} \right) \right.
\nonumber
\\
& \left. + \left( C_A \frac{s}{t + u} + C_F \frac{s t + 2 s u + 4 t u + 4 u^2}{\left(t + u\right)^2}
\right) \ln\frac{s}{M_V^2} \right.
\nonumber
\\
& \left. + \left(C_A \frac{t}{s + u} + C_F \frac{s t + 4 s u + 2 t u + 4 u^2}{\left(s + u\right)^2} \right) \ln\frac{-t}{M_V^2} \right.
\nonumber
\\
& \left. + \left(-C_A + 2 C_F\right) \left[-\frac{M_V^2 \left(s^2 + t^2\right)}{s t \left(s + t\right)}
+ 2 \left(\frac{2 u}{s + t} + \frac{u^2}{\left(s + t\right)^2}\right) \ln\frac{-u}{M_V^2} \right] \right.
\nonumber
\\
& \left. - \left(-C_A + 2 C_F\right) \left[ \frac{u^2 + \left(t + u\right)^2}{s t}
\left(\frac{1}{2} \ln^2\frac{s}{M_V^2}
- \frac{1}{2} \ln^2\frac{M_V^2 - u}{M_V^2} + \ln\frac{s}{M_V^2} \ln\frac{-u}{s-M_V^2}
\right.\right.\right.
\nonumber
\\
& \left.\left.\left. + \Li2\left(\frac{M_V^2}{s}\right) - \Li2\left(\frac{M_V^2}{M_V^2 - u}\right)
\right) \right.\right.
\nonumber
\\
& \left.\left. + \frac{u^2 + \left(s + u\right)^2}{s t} \left(-\frac{\pi^2}{2}
- \frac{1}{2} \ln^2\frac{M_V^2 - t}{M_V^2} - \frac{1}{2} \ln^2\frac{M_V^2 - u}{M_V^2}
+ \ln\frac{-t}{M_V^2} \ln\frac{-u}{M_V^2} \right.\right.\right.
\nonumber
\\
& \left.\left.\left. - \Li2\left(\frac{M_V^2}{M_V^2 - t}\right)
- \Li2\left(\frac{M_V^2}{M_V^2 - u}\right)
\right)
\right]
\right\}\,.
\end{align}
\end{appendix}
|
{
"timestamp": "2013-08-08T02:03:59",
"yymm": "1206",
"arxiv_id": "1206.6115",
"language": "en",
"url": "https://arxiv.org/abs/1206.6115"
}
|
\section{Acknowledgments}
We thank Carl Albright for many useful discussions that led to the development of LieART.
We also thank Tanja Feger for checking the tables against those found in \cite{Slansky}.
The work of RPF was supported by a fellowship within the Postdoc-Programme of the German
Academic Exchange Service (DAAD). The work of RPF and TWK was supported by US
DOE grant E-FG05-85ER40226.
\section{Tables}
\setcounter{table}{0}
We present here tables of properties of irreps, such as Dynkin labels,
dimensional names, indices, congruency classes and the number of singlets in
various subalgebra branchings in Section \ref{ssec:IrrepProperties}, as well as
tables of tensor products in Section \ref{ssec:TensorProducts} and subalgebra
branching rules in Section \ref{ssec:BranchingRules} for many classical and all
exceptional Lie algebras. In presentation style, selection of irreps and
subalgebra branching we closely follow \cite{Slansky}, which has been the
definitive reference for unified model building since its publication. The
tables were created by the supplemental package \texttt{Tables.m}, which uses LieART
for the computation. The tables can also be found as Mathematica notebooks in the
LieART documentation integrated into the Mathematica documentation center as
``Representation Properties'', ``Tensor Products'' and ``Branching Rules'' under
the section ``Tables'' on the LieART documentation home. Since LieART comes with
the functions that generate the tables, the user may extend them to the limit of
his or her computer power.
\begin{table}[!!h]
\begin{center}
\begin{tabular}{l|ll|ll|ll}
\toprule\rowcolor{tableheadcolor}
&\multicolumn{2}{>{\columncolor{tableheadcolor}}l|}{\textbf{Irrep Properties}} & \multicolumn{2}{>{\columncolor{tableheadcolor}}l|}{\textbf{Tensor Products}} & \multicolumn{2}{>{\columncolor{tableheadcolor}}l}{\textbf{Branching Rules}}\\
\rowcolor{tableheadcolor}\textbf{Algebra} & \textbf{Number} & \textbf{Page} & \textbf{Number} & \textbf{Page} & \textbf{Number} & \textbf{Page}\\
\midrule
\SU2 & \ref{tab:SU2Irreps} & \pageref{tab:SU2Irreps} & \ref{tab:SU2TensorProducts} & \pageref{tab:SU2TensorProducts} & \ref{tab:SU2BranchingRules} & \pageref{tab:SU2BranchingRules} \\
\SU3 & \ref{tab:SU3Irreps} & \pageref{tab:SU3Irreps} & \ref{tab:SU3TensorProducts} & \pageref{tab:SU3TensorProducts} & \ref{tab:SU3BranchingRules} & \pageref{tab:SU3BranchingRules} \\
\SU4 & \ref{tab:SU4Irreps} & \pageref{tab:SU4Irreps} & \ref{tab:SU4TensorProducts} & \pageref{tab:SU4TensorProducts} & \ref{tab:SU4BranchingRules} & \pageref{tab:SU4BranchingRules} \\
\SU5 & \ref{tab:SU5Irreps} & \pageref{tab:SU5Irreps} & \ref{tab:SU5TensorProducts} & \pageref{tab:SU5TensorProducts} & \ref{tab:SU5BranchingRules} & \pageref{tab:SU5BranchingRules} \\
\SU6 & \ref{tab:SU6Irreps} & \pageref{tab:SU6Irreps} & \ref{tab:SU6TensorProducts} & \pageref{tab:SU6TensorProducts} & \ref{tab:SU6BranchingRules} & \pageref{tab:SU6BranchingRules} \\
\SU7 & \ref{tab:SU7Irreps} & \pageref{tab:SU7Irreps} & \ref{tab:SU7TensorProducts} & \pageref{tab:SU7TensorProducts} & \ref{tab:SU7BranchingRules} & \pageref{tab:SU7BranchingRules} \\
\SU8 & \ref{tab:SU8Irreps} & \pageref{tab:SU8Irreps} & \ref{tab:SU8TensorProducts} & \pageref{tab:SU8TensorProducts} & \ref{tab:SU8BranchingRules} & \pageref{tab:SU8BranchingRules} \\
\SU9 & \ref{tab:SU9Irreps} & \pageref{tab:SU9Irreps} & \ref{tab:SU9TensorProducts} & \pageref{tab:SU9TensorProducts} & \ref{tab:SU9BranchingRules} & \pageref{tab:SU9BranchingRules} \\
\SU{10} & \ref{tab:SU10Irreps} & \pageref{tab:SU10Irreps} & \ref{tab:SU10TensorProducts} & \pageref{tab:SU10TensorProducts} & \ref{tab:SU10BranchingRules} & \pageref{tab:SU10BranchingRules}\\
\SU{11} & \ref{tab:SU11Irreps} & \pageref{tab:SU11Irreps} & \ref{tab:SU11TensorProducts} & \pageref{tab:SU11TensorProducts} & \ref{tab:SU11BranchingRules} & \pageref{tab:SU11BranchingRules}\\
\SU{12} & \ref{tab:SU12Irreps} & \pageref{tab:SU12Irreps} & \ref{tab:SU12TensorProducts} & \pageref{tab:SU12TensorProducts} & \ref{tab:SU12BranchingRules} & \pageref{tab:SU12BranchingRules}\\
\midrule
\SO7 & \ref{tab:SO7Irreps} & \pageref{tab:SO7Irreps} & \ref{tab:SO7TensorProducts} & \pageref{tab:SO7TensorProducts} & \ref{tab:SO7BranchingRules} & \pageref{tab:SO7BranchingRules} \\
\SO8 & \ref{tab:SO8Irreps} & \pageref{tab:SO8Irreps} & \ref{tab:SO8TensorProducts} & \pageref{tab:SO8TensorProducts} & \ref{tab:SO8BranchingRules} & \pageref{tab:SO8BranchingRules} \\
\SO9 & \ref{tab:SO9Irreps} & \pageref{tab:SO9Irreps} & \ref{tab:SO9TensorProducts} & \pageref{tab:SO9TensorProducts} & \ref{tab:SO9BranchingRules} & \pageref{tab:SO9BranchingRules} \\
\SO{10} & \ref{tab:SO10Irreps} & \pageref{tab:SO10Irreps} & \ref{tab:SO10TensorProducts} & \pageref{tab:SO10TensorProducts} & \ref{tab:SO10BranchingRules} & \pageref{tab:SO10BranchingRules}\\
\SO{11} & \ref{tab:SO11Irreps} & \pageref{tab:SO11Irreps} & \ref{tab:SO11TensorProducts} & \pageref{tab:SO11TensorProducts} & -- & -- \\
\SO{12} & \ref{tab:SO12Irreps} & \pageref{tab:SO12Irreps} & \ref{tab:SO12TensorProducts} & \pageref{tab:SO12TensorProducts} & -- & -- \\
\SO{13} & \ref{tab:SO13Irreps} & \pageref{tab:SO13Irreps} & \ref{tab:SO13TensorProducts} & \pageref{tab:SO13TensorProducts} & -- & -- \\
\SO{14} & \ref{tab:SO14Irreps} & \pageref{tab:SO14Irreps} & \ref{tab:SO14TensorProducts} & \pageref{tab:SO14TensorProducts} & \ref{tab:SO14BranchingRules} & \pageref{tab:SO14BranchingRules}\\
\SO{18} & \ref{tab:SO18Irreps} & \pageref{tab:SO18Irreps} & \ref{tab:SO18TensorProducts} & \pageref{tab:SO18TensorProducts} & \ref{tab:SO18BranchingRules} & \pageref{tab:SO18BranchingRules}\\
\SO{22} & \ref{tab:SO22Irreps} & \pageref{tab:SO22Irreps} & \ref{tab:SO22TensorProducts} & \pageref{tab:SO22TensorProducts} & \ref{tab:SO22BranchingRules} & \pageref{tab:SO22BranchingRules}\\
\SO{26} & \ref{tab:SO26Irreps} & \pageref{tab:SO26Irreps} & \ref{tab:SO26TensorProducts} & \pageref{tab:SO26TensorProducts} & \ref{tab:SO26BranchingRules} & \pageref{tab:SO26BranchingRules}\\
\midrule
\Sp4 & \ref{tab:Sp4Irreps} & \pageref{tab:Sp4Irreps} & \ref{tab:Sp4TensorProducts} & \pageref{tab:Sp4TensorProducts} & -- & -- \\
\Sp6 & \ref{tab:Sp6Irreps} & \pageref{tab:Sp6Irreps} & \ref{tab:Sp6TensorProducts} & \pageref{tab:Sp6TensorProducts} & -- & -- \\
\Sp8 & \ref{tab:Sp8Irreps} & \pageref{tab:Sp8Irreps} & \ref{tab:Sp8TensorProducts} & \pageref{tab:Sp8TensorProducts} & -- & -- \\
\Sp{10} & \ref{tab:Sp10Irreps} & \pageref{tab:Sp10Irreps} & \ref{tab:Sp10TensorProducts} & \pageref{tab:Sp10TensorProducts} & -- & -- \\
\Sp{12} & \ref{tab:Sp12Irreps} & \pageref{tab:Sp12Irreps} & \ref{tab:Sp12TensorProducts} & \pageref{tab:Sp12TensorProducts} & -- & -- \\
\midrule
\E6 & \ref{tab:E6Irreps} & \pageref{tab:E6Irreps} & \ref{tab:E6TensorProducts} & \pageref{tab:E6TensorProducts} & \ref{tab:E6BranchingRules} & \pageref{tab:E6BranchingRules} \\
\E7 & \ref{tab:E7Irreps} & \pageref{tab:E7Irreps} & \ref{tab:E7TensorProducts} & \pageref{tab:E7TensorProducts} & \ref{tab:E7BranchingRules} & \pageref{tab:E7BranchingRules} \\
\E8 & \ref{tab:E8Irreps} & \pageref{tab:E8Irreps} & \ref{tab:E8TensorProducts} & \pageref{tab:E8TensorProducts} & \ref{tab:E8BranchingRules} & \pageref{tab:E8BranchingRules} \\
\F4 & \ref{tab:F4Irreps} & \pageref{tab:F4Irreps} & \ref{tab:F4TensorProducts} & \pageref{tab:F4TensorProducts} & -- & -- \\
\G2 & \ref{tab:G2Irreps} & \pageref{tab:G2Irreps} & \ref{tab:G2TensorProducts} & \pageref{tab:G2TensorProducts} & -- & -- \\
\bottomrule
\end{tabular}
\end{center}
\caption{Table of tables}
\end{table}
\newpage
\subsection{Properties of Irreducible Representations}
\label{ssec:IrrepProperties}
{
\newcommand\starred[1]{#1\makebox[0pt][l]{${}^\ast$}}
\renewcommand{\irrep}[2][0]{\ensuremath{\irrepbase{#2}\makebox[0pt][l]{${}^{\primes{#1}{\prime}}$}}}
\renewcommand{\irrepbar}[2][0]{\ensuremath{\irrepbarbase{#2}}\makebox[0pt][l]{${}^{\primes{#1}{\prime}}$}}
\renewcommand{\irrepsub}[3][0]{\ensuremath{\irrep[#1]{#2}\makebox[0pt][l]{${}_\text{#3}$}}}
\renewcommand{\irrepbarsub}[3][0]{\ensuremath{\irrepbar[#1]{#2}}\makebox[0pt][l]{${}_\text{#3}$}}%
\input{IrrepPropertiesTablesFinal}
}
\newpage
\subsection{Tensor Products}
\label{ssec:TensorProducts}
\setlength\extrarowheight{1.3pt}
\renewcommand{\tabcolsep}{2pt}
\input{TensorProductsTablesFinal}
\newpage
\subsection{Branching Rules}
\label{ssec:BranchingRules}
\input{BranchingRulesTablesFinal}
\subsubsection{\SU{N}}
\begin{longtable}{rcp{0.9\textwidth}}
\caption{\label{tab:SU2BranchingRules}SU(2) Branching Rules}\\
\endfirsthead
\caption[]{SU(2) Branching Rules (continued)}\\
\endhead
\endfoot
\toprule
\rowcolor{tableheadcolor}
SU(2)& $\to$ &U(1)\\
\midrule
\irrep{2} & = & $(1)+(-1)$\\
\irrep{3} & = & $(2)+(0)+(-2)$\\
\irrep{4} & = & $(3)+(1)+(-1)+(-3)$\\
\irrep{5} & = & $(4)+(2)+(0)+(-2)+(-4)$\\
\irrep{6} & = & $(5)+(3)+(1)+(-1)+(-3)+(-5)$\\
\irrep{7} & = & $(6)+(4)+(2)+(0)+(-2)+(-4)+(-6)$\\
\irrep{8} & = & $(7)+(5)+(3)+(1)+(-1)+(-3)+(-5)+(-7)$\\
\irrep{9} & = & $(8)+(6)+(4)+(2)+(0)+(-2)+(-4)+(-6)+(-8)$\\
\irrep{10} & = & $(9)+(7)+(5)+(3)+(1)+(-1)+(-3)+(-5)+(-7)+(-9)$\\
\irrep{11} & = & $(10)+(8)+(6)+(4)+(2)+(0)+(-2)+(-4)+(-6)+(-8)+(-10)$\\
\irrep{12} & = & $(11)+(9)+(7)+(5)+(3)+(1)+(-1)+(-3)+(-5)+(-7)+(-9)+(-11)$\\
\irrep{13} & = & $(12)+(10)+(8)+(6)+(4)+(2)+(0)+(-2)+(-4)+(-6)+(-8)+(-10)+(-12)$\\
\irrep{14} & = & $(13)+(11)+(9)+(7)+(5)+(3)+(1)+(-1)+(-3)+(-5)+(-7)+(-9)+(-11)+(-13)$\\
\irrep{15} & = & $(14)+(12)+(10)+(8)+(6)+(4)+(2)+(0)+(-2)+(-4)+(-6)+(-8)+(-10)+(-12)+(-14)$\\
\irrep{16} & = & $(15)+(13)+(11)+(9)+(7)+(5)+(3)+(1)+(-1)+(-3)+(-5)+(-7)+(-9)+(-11)+(-13)+(-15)$\\
\irrep{17} & = & $(16)+(14)+(12)+(10)+(8)+(6)+(4)+(2)+(0)+(-2)+(-4)+(-6)+(-8)+(-10)+(-12)+(-14)+(-16)$\\
\irrep{18} & = & $(17)+(15)+(13)+(11)+(9)+(7)+(5)+(3)+(1)+(-1)+(-3)+(-5)+(-7)+(-9)+(-11)+(-13)+(-15)+(-17)$\\
\bottomrule
\end{longtable}
\begin{longtable}{rcp{0.9\textwidth}}
\caption{\label{tab:SU3BranchingRules}SU(3) Branching Rules}\\
\endfirsthead
\caption[]{SU(3) Branching Rules (continued)}\\
\endhead
\endfoot
\toprule
\rowcolor{tableheadcolor}
SU(3)& $\to$ &SU(2)${\times}$U(1)\\
\midrule
\irrep{3} & = & $(\irrep{1})(-2)+(\irrep{2})(1)$\\
\irrep{6} & = & $(\irrep{1})(-4)+(\irrep{2})(-1)+(\irrep{3})(2)$\\
\irrep{8} & = & $(\irrep{1})(0)+(\irrep{2})(3)+(\irrep{2})(-3)+(\irrep{3})(0)$\\
\irrep{10} & = & $(\irrep{1})(-6)+(\irrep{2})(-3)+(\irrep{3})(0)+(\irrep{4})(3)$\\
\irrep{15} & = & $(\irrep{1})(-2)+(\irrep{2})(1)+(\irrep{2})(-5)+(\irrep{3})(4)+(\irrep{3})(-2)+(\irrep{4})(1)$\\
\irrep[1]{15} & = & $(\irrep{1})(-8)+(\irrep{2})(-5)+(\irrep{3})(-2)+(\irrep{4})(1)+(\irrep{5})(4)$\\
\irrep{21} & = & $(\irrep{1})(10)+(\irrep{2})(7)+(\irrep{3})(4)+(\irrep{4})(1)+(\irrep{5})(-2)+(\irrep{6})(-5)$\\
\irrep{24} & = & $(\irrep{1})(4)+(\irrep{2})(7)+(\irrep{2})(1)+(\irrep{3})(4)+(\irrep{3})(-2)+(\irrep{4})(1)+(\irrep{4})(-5)+(\irrep{5})(-2)$\\
\irrep{27} & = & $(\irrep{1})(0)+(\irrep{2})(3)+(\irrep{2})(-3)+(\irrep{3})(6)+(\irrep{3})(0)+(\irrep{3})(-6)+(\irrep{4})(3)+(\irrep{4})(-3)+(\irrep{5})(0)$\\
\irrep{28} & = & $(\irrep{1})(-12)+(\irrep{2})(-9)+(\irrep{3})(-6)+(\irrep{4})(-3)+(\irrep{5})(0)+(\irrep{6})(3)+(\irrep{7})(6)$\\
\irrep{35} & = & $(\irrep{1})(-6)+(\irrep{2})(-3)+(\irrep{2})(-9)+(\irrep{3})(0)+(\irrep{3})(-6)+(\irrep{4})(3)+(\irrep{4})(-3)+(\irrep{5})(6)+(\irrep{5})(0)+(\irrep{6})(3)$\\
\irrep{36} & = & $(\irrep{1})(-14)+(\irrep{2})(-11)+(\irrep{3})(-8)+(\irrep{4})(-5)+(\irrep{5})(-2)+(\irrep{6})(1)+(\irrep{7})(4)+(\irrep{8})(7)$\\
\irrep{42} & = & $(\irrep{1})(-2)+(\irrep{2})(1)+(\irrep{2})(-5)+(\irrep{3})(4)+(\irrep{3})(-2)+(\irrep{3})(-8)+(\irrep{4})(7)+(\irrep{4})(1)+(\irrep{4})(-5)+(\irrep{5})(4)+(\irrep{5})(-2)+(\irrep{6})(1)$\\
\irrep{45} & = & $(\irrep{1})(16)+(\irrep{2})(13)+(\irrep{3})(10)+(\irrep{4})(7)+(\irrep{5})(4)+(\irrep{6})(1)+(\irrep{7})(-2)+(\irrep{8})(-5)+(\irrep{9})(-8)$\\
\irrep{48} & = & $(\irrep{1})(-8)+(\irrep{2})(-5)+(\irrep{2})(-11)+(\irrep{3})(-2)+(\irrep{3})(-8)+(\irrep{4})(1)+(\irrep{4})(-5)+(\irrep{5})(4)+(\irrep{5})(-2)+(\irrep{6})(7)+(\irrep{6})(1)+(\irrep{7})(4)$\\
\irrep{55} & = & $(\irrep{1})(-18)+(\irrep{2})(-15)+(\irrep{3})(-12)+(\irrep{4})(-9)+(\irrep{5})(-6)+(\irrep{6})(-3)+(\irrep{7})(0)+(\irrep{8})(3)+(\irrep{9})(6)+(\irrep{10})(9)$\\
\bottomrule
\end{longtable}
\newpage
\begin{longtable}{rcp{0.9\textwidth}}
\caption{\label{tab:SU4BranchingRules}SU(4) Branching Rules}\\
\endfirsthead
\caption[]{SU(4) Branching Rules (continued)}\\
\endhead
\endfoot
\toprule
\rowcolor{tableheadcolor}
SU(4)& $\to$ &SU(3)${\times}$U(1)\\
\midrule
\irrep{4} & = & $(\irrep{1})(-3)+(\irrep{3})(1)$\\
\irrep{6} & = & $(\irrep{3})(-2)+(\irrepbar{3})(2)$\\
\irrep{10} & = & $(\irrep{1})(-6)+(\irrep{3})(-2)+(\irrep{6})(2)$\\
\irrep{15} & = & $(\irrep{1})(0)+(\irrep{3})(4)+(\irrepbar{3})(-4)+(\irrep{8})(0)$\\
\irrep{20} & = & $(\irrep{3})(1)+(\irrepbar{3})(5)+(\irrepbar{6})(1)+(\irrep{8})(-3)$\\
\irrep[1]{20} & = & $(\irrepbar{6})(4)+(\irrep{6})(-4)+(\irrep{8})(0)$\\
\irrep[2]{20} & = & $(\irrep{1})(9)+(\irrepbar{3})(5)+(\irrepbar{6})(1)+(\irrepbar{10})(-3)$\\
\irrep{35} & = & $(\irrep{1})(-12)+(\irrep{3})(-8)+(\irrep{6})(-4)+(\irrep{10})(0)+(\irrep[1]{15})(4)$\\
\irrep{36} & = & $(\irrep{1})(-3)+(\irrep{3})(1)+(\irrepbar{3})(-7)+(\irrep{6})(5)+(\irrep{8})(-3)+(\irrep{15})(1)$\\
\irrep{45} & = & $(\irrep{3})(-8)+(\irrepbar{3})(-4)+(\irrep{6})(-4)+(\irrep{8})(0)+(\irrep{10})(0)+(\irrep{15})(4)$\\
\irrep{50} & = & $(\irrep{10})(-6)+(\irrepbar{10})(6)+(\irrep{15})(-2)+(\irrepbar{15})(2)$\\
\irrep{56} & = & $(\irrep{1})(-15)+(\irrep{3})(-11)+(\irrep{6})(-7)+(\irrep{10})(-3)+(\irrep[1]{15})(1)+(\irrepbar{21})(5)$\\
\irrep{60} & = & $(\irrepbar{6})(1)+(\irrep{6})(-7)+(\irrep{8})(-3)+(\irrep{10})(-3)+(\irrep{15})(1)+(\irrepbar{15})(5)$\\
\irrep{64} & = & $(\irrep{3})(-2)+(\irrepbar{3})(2)+(\irrepbar{6})(-2)+(\irrep{6})(2)+(\irrep{8})(6)+(\irrep{8})(-6)+(\irrep{15})(-2)+(\irrepbar{15})(2)$\\
\irrep{70} & = & $(\irrep{1})(-6)+(\irrep{3})(-2)+(\irrepbar{3})(-10)+(\irrep{6})(2)+(\irrep{8})(-6)+(\irrep{10})(6)+(\irrep{15})(-2)+(\irrepbar{24})(2)$\\
\irrep{84} & = & $(\irrep{1})(0)+(\irrep{3})(4)+(\irrepbar{3})(-4)+(\irrepbar{6})(-8)+(\irrep{6})(8)+(\irrep{8})(0)+(\irrep{15})(4)+(\irrepbar{15})(-4)+(\irrep{27})(0)$\\
\irrep[1]{84} & = & $(\irrep{3})(-11)+(\irrepbar{3})(-7)+(\irrep{6})(-7)+(\irrep{8})(-3)+(\irrep{10})(-3)+(\irrep{15})(1)+(\irrep[1]{15})(1)+(\irrepbar{24})(5)$\\
\irrep[2]{84} & = & $(\irrep{1})(-18)+(\irrep{3})(-14)+(\irrep{6})(-10)+(\irrep{10})(-6)+(\irrep[1]{15})(-2)+(\irrepbar{21})(2)+(\irrep{28})(6)$\\
\bottomrule
\rowcolor{white}\\[-\medskipamount]
\toprule
\rowcolor{tableheadcolor}
SU(4)& $\to$ &SU(2)${\times}$SU(2)${\times}$U(1)\\
\midrule
\irrep{4} & = & $(\irrep{2},\irrep{1})(1)+(\irrep{1},\irrep{2})(-1)$\\
\irrep{6} & = & $(\irrep{1},\irrep{1})(2)+(\irrep{1},\irrep{1})(-2)+(\irrep{2},\irrep{2})(0)$\\
\irrep{10} & = & $(\irrep{2},\irrep{2})(0)+(\irrep{3},\irrep{1})(2)+(\irrep{1},\irrep{3})(-2)$\\
\irrep{15} & = & $(\irrep{1},\irrep{1})(0)+(\irrep{2},\irrep{2})(2)+(\irrep{2},\irrep{2})(-2)+(\irrep{3},\irrep{1})(0)+(\irrep{1},\irrep{3})(0)$\\
\irrep{20} & = & $(\irrep{2},\irrep{1})(1)+(\irrep{2},\irrep{1})(-3)+(\irrep{1},\irrep{2})(3)+(\irrep{1},\irrep{2})(-1)+(\irrep{3},\irrep{2})(-1)+(\irrep{2},\irrep{3})(1)$\\
\irrep[1]{20} & = & $(\irrep{1},\irrep{1})(4)+(\irrep{1},\irrep{1})(0)+(\irrep{1},\irrep{1})(-4)+(\irrep{2},\irrep{2})(2)+(\irrep{2},\irrep{2})(-2)+(\irrep{3},\irrep{3})(0)$\\
\irrep[2]{20} & = & $(\irrep{3},\irrep{2})(-1)+(\irrep{2},\irrep{3})(1)+(\irrep{4},\irrep{1})(-3)+(\irrep{1},\irrep{4})(3)$\\
\irrep{35} & = & $(\irrep{3},\irrep{3})(0)+(\irrep{4},\irrep{2})(2)+(\irrep{2},\irrep{4})(-2)+(\irrep{5},\irrep{1})(4)+(\irrep{1},\irrep{5})(-4)$\\
\irrep{36} & = & $(\irrep{2},\irrep{1})(1)+(\irrep{1},\irrep{2})(-1)+(\irrep{3},\irrep{2})(3)+(\irrep{3},\irrep{2})(-1)+(\irrep{2},\irrep{3})(1)+(\irrep{2},\irrep{3})(-3)+(\irrep{4},\irrep{1})(1)+(\irrep{1},\irrep{4})(-1)$\\
\irrep{45} & = & $(\irrep{2},\irrep{2})(2)+(\irrep{2},\irrep{2})(-2)+(\irrep{3},\irrep{1})(4)+(\irrep{3},\irrep{1})(0)+(\irrep{1},\irrep{3})(0)+(\irrep{1},\irrep{3})(-4)+(\irrep{3},\irrep{3})(0)+(\irrep{4},\irrep{2})(2)+(\irrep{2},\irrep{4})(-2)$\\
\irrep{50} & = & $(\irrep{1},\irrep{1})(6)+(\irrep{1},\irrep{1})(2)+(\irrep{1},\irrep{1})(-2)+(\irrep{1},\irrep{1})(-6)+(\irrep{2},\irrep{2})(4)+(\irrep{2},\irrep{2})(0)+(\irrep{2},\irrep{2})(-4)+(\irrep{3},\irrep{3})(2)+(\irrep{3},\irrep{3})(-2)+(\irrep{4},\irrep{4})(0)$\\
\irrep{56} & = & $(\irrep{4},\irrep{3})(1)+(\irrep{3},\irrep{4})(-1)+(\irrep{5},\irrep{2})(3)+(\irrep{2},\irrep{5})(-3)+(\irrep{6},\irrep{1})(5)+(\irrep{1},\irrep{6})(-5)$\\
\irrep{60} & = & $(\irrep{2},\irrep{1})(5)+(\irrep{2},\irrep{1})(1)+(\irrep{2},\irrep{1})(-3)+(\irrep{1},\irrep{2})(3)+(\irrep{1},\irrep{2})(-1)+(\irrep{1},\irrep{2})(-5)+(\irrep{3},\irrep{2})(3)+(\irrep{3},\irrep{2})(-1)+(\irrep{2},\irrep{3})(1)+(\irrep{2},\irrep{3})(-3)+(\irrep{4},\irrep{3})(1)+(\irrep{3},\irrep{4})(-1)$\\
\irrep{64} & = & $(\irrep{1},\irrep{1})(2)+(\irrep{1},\irrep{1})(-2)+(\irrep{2},\irrep{2})(4)+2(\irrep{2},\irrep{2})(0)+(\irrep{2},\irrep{2})(-4)+(\irrep{3},\irrep{1})(2)+(\irrep{3},\irrep{1})(-2)+(\irrep{1},\irrep{3})(2)+(\irrep{1},\irrep{3})(-2)+(\irrep{3},\irrep{3})(2)+(\irrep{3},\irrep{3})(-2)+(\irrep{4},\irrep{2})(0)+(\irrep{2},\irrep{4})(0)$\\
\irrep{70} & = & $(\irrep{2},\irrep{2})(0)+(\irrep{3},\irrep{1})(2)+(\irrep{1},\irrep{3})(-2)+(\irrep{3},\irrep{3})(2)+(\irrep{3},\irrep{3})(-2)+(\irrep{4},\irrep{2})(4)+(\irrep{4},\irrep{2})(0)+(\irrep{2},\irrep{4})(0)+(\irrep{2},\irrep{4})(-4)+(\irrep{5},\irrep{1})(2)+(\irrep{1},\irrep{5})(-2)$\\
\irrep{84} & = & $(\irrep{1},\irrep{1})(0)+(\irrep{2},\irrep{2})(2)+(\irrep{2},\irrep{2})(-2)+(\irrep{3},\irrep{1})(0)+(\irrep{1},\irrep{3})(0)+(\irrep{3},\irrep{3})(4)+(\irrep{3},\irrep{3})(0)+(\irrep{3},\irrep{3})(-4)+(\irrep{4},\irrep{2})(2)+(\irrep{4},\irrep{2})(-2)+(\irrep{2},\irrep{4})(2)+(\irrep{2},\irrep{4})(-2)+(\irrep{5},\irrep{1})(0)+(\irrep{1},\irrep{5})(0)$\\
\irrep[1]{84} & = & $(\irrep{3},\irrep{2})(3)+(\irrep{3},\irrep{2})(-1)+(\irrep{2},\irrep{3})(1)+(\irrep{2},\irrep{3})(-3)+(\irrep{4},\irrep{1})(5)+(\irrep{4},\irrep{1})(1)+(\irrep{1},\irrep{4})(-1)+(\irrep{1},\irrep{4})(-5)+(\irrep{4},\irrep{3})(1)+(\irrep{3},\irrep{4})(-1)+(\irrep{5},\irrep{2})(3)+(\irrep{2},\irrep{5})(-3)$\\
\irrep[2]{84} & = & $(\irrep{4},\irrep{4})(0)+(\irrep{5},\irrep{3})(2)+(\irrep{3},\irrep{5})(-2)+(\irrep{6},\irrep{2})(4)+(\irrep{2},\irrep{6})(-4)+(\irrep{7},\irrep{1})(6)+(\irrep{1},\irrep{7})(-6)$\\
\bottomrule
\end{longtable}
\newpage
\begin{longtable}{rcp{0.9\textwidth}}
\caption{\label{tab:SU5BranchingRules}SU(5) Branching Rules}\\
\endfirsthead
\caption[]{SU(5) Branching Rules (continued)}\\
\endhead
\endfoot
\toprule
\rowcolor{tableheadcolor}
SU(5)& $\to$ &SU(4)${\times}$U(1)\\
\midrule
\irrep{5} & = & $(\irrep{1})(-4)+(\irrep{4})(1)$\\
\irrep{10} & = & $(\irrep{4})(-3)+(\irrep{6})(2)$\\
\irrep{15} & = & $(\irrep{1})(-8)+(\irrep{4})(-3)+(\irrep{10})(2)$\\
\irrep{24} & = & $(\irrep{1})(0)+(\irrep{4})(5)+(\irrepbar{4})(-5)+(\irrep{15})(0)$\\
\irrep{35} & = & $(\irrep{1})(12)+(\irrepbar{4})(7)+(\irrepbar{10})(2)+(\irrep[2]{20})(-3)$\\
\irrep{40} & = & $(\irrepbar{4})(7)+(\irrep{6})(2)+(\irrepbar{10})(2)+(\irrep{20})(-3)$\\
\irrep{45} & = & $(\irrep{4})(1)+(\irrep{6})(6)+(\irrep{15})(-4)+(\irrep{20})(1)$\\
\irrep{50} & = & $(\irrepbar{10})(6)+(\irrep{20})(1)+(\irrep[1]{20})(-4)$\\
\irrep{70} & = & $(\irrep{1})(-4)+(\irrep{4})(1)+(\irrepbar{4})(-9)+(\irrep{10})(6)+(\irrep{15})(-4)+(\irrep{36})(1)$\\
\irrep[1]{70} & = & $(\irrep{1})(16)+(\irrepbar{4})(11)+(\irrepbar{10})(6)+(\irrep[2]{20})(1)+(\irrepbar{35})(-4)$\\
\irrep{75} & = & $(\irrep{15})(0)+(\irrep{20})(5)+(\irrepbar{20})(-5)+(\irrep[1]{20})(0)$\\
\irrep{105} & = & $(\irrepbar{4})(11)+(\irrep{6})(6)+(\irrepbar{10})(6)+(\irrep{20})(1)+(\irrep[2]{20})(1)+(\irrepbar{45})(-4)$\\
\irrep{126} & = & $(\irrepbar{4})(-5)+(\irrep{6})(-10)+(\irrep{15})(0)+(\irrepbar{20})(-5)+(\irrep{36})(5)+(\irrep{45})(0)$\\
\irrep[1]{126} & = & $(\irrep{1})(-20)+(\irrep{4})(-15)+(\irrep{10})(-10)+(\irrepbar[2]{20})(-5)+(\irrep{35})(0)+(\irrep{56})(5)$\\
\irrep{160} & = & $(\irrep{1})(-8)+(\irrep{4})(-3)+(\irrepbar{4})(-13)+(\irrep{10})(2)+(\irrep{15})(-8)+(\irrepbar[2]{20})(7)+(\irrep{36})(-3)+(\irrep{70})(2)$\\
\irrep{175} & = & $(\irrep{4})(-3)+(\irrep{6})(2)+(\irrep{10})(2)+(\irrep{15})(-8)+(\irrep{20})(-3)+(\irrepbar{20})(7)+(\irrep{36})(-3)+(\irrep{64})(2)$\\
\irrep[1]{175} & = & $(\irrep{10})(-10)+(\irrepbar{20})(-5)+(\irrep[1]{20})(0)+(\irrepbar[2]{20})(-5)+(\irrep{45})(0)+(\irrep{60})(5)$\\
\irrep[2]{175} & = & $(\irrepbar[2]{20})(-9)+(\irrep{45})(-4)+(\irrep{50})(6)+(\irrep{60})(1)$\\
\bottomrule
\rowcolor{white}\\[-\medskipamount]
\toprule
\rowcolor{tableheadcolor}
SU(5)& $\to$ &SU(3)${\times}$SU(2)${\times}$U(1)\\
\midrule
\irrep{5} & = & $(\irrep{1},\irrep{2})(-3)+(\irrep{3},\irrep{1})(2)$\\
\irrep{10} & = & $(\irrep{1},\irrep{1})(-6)+(\irrepbar{3},\irrep{1})(4)+(\irrep{3},\irrep{2})(-1)$\\
\irrep{15} & = & $(\irrep{1},\irrep{3})(-6)+(\irrep{3},\irrep{2})(-1)+(\irrep{6},\irrep{1})(4)$\\
\irrep{24} & = & $(\irrep{1},\irrep{1})(0)+(\irrep{1},\irrep{3})(0)+(\irrep{3},\irrep{2})(5)+(\irrepbar{3},\irrep{2})(-5)+(\irrep{8},\irrep{1})(0)$\\
\irrep{35} & = & $(\irrep{1},\irrep{4})(9)+(\irrepbar{3},\irrep{3})(4)+(\irrepbar{6},\irrep{2})(-1)+(\irrepbar{10},\irrep{1})(-6)$\\
\irrep{40} & = & $(\irrep{1},\irrep{2})(9)+(\irrepbar{3},\irrep{1})(4)+(\irrep{3},\irrep{2})(-1)+(\irrepbar{3},\irrep{3})(4)+(\irrepbar{6},\irrep{2})(-1)+(\irrep{8},\irrep{1})(-6)$\\
\irrep{45} & = & $(\irrep{1},\irrep{2})(-3)+(\irrep{3},\irrep{1})(2)+(\irrepbar{3},\irrep{1})(-8)+(\irrepbar{3},\irrep{2})(7)+(\irrep{3},\irrep{3})(2)+(\irrepbar{6},\irrep{1})(2)+(\irrep{8},\irrep{2})(-3)$\\
\irrep{50} & = & $(\irrep{1},\irrep{1})(12)+(\irrep{3},\irrep{1})(2)+(\irrepbar{3},\irrep{2})(7)+(\irrep{6},\irrep{1})(-8)+(\irrepbar{6},\irrep{3})(2)+(\irrep{8},\irrep{2})(-3)$\\
\irrep{70} & = & $(\irrep{1},\irrep{2})(-3)+(\irrep{3},\irrep{1})(2)+(\irrep{1},\irrep{4})(-3)+(\irrep{3},\irrep{3})(2)+(\irrepbar{3},\irrep{3})(-8)+(\irrep{6},\irrep{2})(7)+(\irrep{8},\irrep{2})(-3)+(\irrep{15},\irrep{1})(2)$\\
\irrep[1]{70} & = & $(\irrep{1},\irrep{5})(12)+(\irrepbar{3},\irrep{4})(7)+(\irrepbar{6},\irrep{3})(2)+(\irrepbar{10},\irrep{2})(-3)+(\irrepbar[1]{15},\irrep{1})(-8)$\\
\irrep{75} & = & $(\irrep{1},\irrep{1})(0)+(\irrep{3},\irrep{1})(-10)+(\irrepbar{3},\irrep{1})(10)+(\irrep{3},\irrep{2})(5)+(\irrepbar{3},\irrep{2})(-5)+(\irrepbar{6},\irrep{2})(5)+(\irrep{6},\irrep{2})(-5)+(\irrep{8},\irrep{1})(0)+(\irrep{8},\irrep{3})(0)$\\
\irrep{105} & = & $(\irrep{1},\irrep{3})(12)+(\irrepbar{3},\irrep{2})(7)+(\irrep{3},\irrep{3})(2)+(\irrepbar{6},\irrep{1})(2)+(\irrepbar{3},\irrep{4})(7)+(\irrepbar{6},\irrep{3})(2)+(\irrep{8},\irrep{2})(-3)+(\irrepbar{10},\irrep{2})(-3)+(\irrepbar{15},\irrep{1})(-8)$\\
\irrep{126} & = & $(\irrep{1},\irrep{3})(0)+(\irrep{3},\irrep{2})(5)+(\irrepbar{3},\irrep{2})(-5)+(\irrep{3},\irrep{3})(-10)+(\irrep{6},\irrep{1})(10)+(\irrepbar{3},\irrep{4})(-5)+(\irrep{6},\irrep{2})(-5)+(\irrep{8},\irrep{1})(0)+(\irrep{8},\irrep{3})(0)+(\irrep{10},\irrep{1})(0)+(\irrep{15},\irrep{2})(5)$\\
\irrep[1]{126} & = & $(\irrep{1},\irrep{6})(-15)+(\irrep{3},\irrep{5})(-10)+(\irrep{6},\irrep{4})(-5)+(\irrep{10},\irrep{3})(0)+(\irrep[1]{15},\irrep{2})(5)+(\irrepbar{21},\irrep{1})(10)$\\
\irrep{160} & = & $(\irrep{1},\irrep{3})(-6)+(\irrep{3},\irrep{2})(-1)+(\irrep{1},\irrep{5})(-6)+(\irrep{6},\irrep{1})(4)+(\irrep{3},\irrep{4})(-1)+(\irrepbar{3},\irrep{4})(-11)+(\irrep{6},\irrep{3})(4)+(\irrep{8},\irrep{3})(-6)+(\irrep{10},\irrep{2})(9)+(\irrep{15},\irrep{2})(-1)+(\irrepbar{24},\irrep{1})(4)$\\
\irrep{175} & = & $(\irrep{1},\irrep{1})(-6)+(\irrepbar{3},\irrep{1})(4)+(\irrep{1},\irrep{3})(-6)+2(\irrep{3},\irrep{2})(-1)+(\irrepbar{3},\irrep{2})(-11)+(\irrepbar{3},\irrep{3})(4)+(\irrep{6},\irrep{1})(4)+(\irrep{3},\irrep{4})(-1)+(\irrepbar{6},\irrep{2})(-1)+(\irrep{8},\irrep{1})(-6)+(\irrep{6},\irrep{3})(4)+(\irrep{8},\irrep{2})(9)+(\irrep{8},\irrep{3})(-6)+(\irrepbar{15},\irrep{1})(4)+(\irrep{15},\irrep{2})(-1)$\\
\irrep[1]{175} & = & $(\irrep{1},\irrep{2})(-15)+(\irrep{3},\irrep{1})(-10)+(\irrepbar{3},\irrep{2})(-5)+(\irrep{3},\irrep{3})(-10)+(\irrepbar{6},\irrep{2})(5)+(\irrep{6},\irrep{2})(-5)+(\irrep{8},\irrep{1})(0)+(\irrep{6},\irrep{4})(-5)+(\irrep{8},\irrep{3})(0)+(\irrep{10},\irrep{3})(0)+(\irrepbar{15},\irrep{1})(10)+(\irrep{15},\irrep{2})(5)$\\
\irrep[2]{175} & = & $(\irrep{1},\irrep{1})(-18)+(\irrepbar{3},\irrep{1})(-8)+(\irrep{3},\irrep{2})(-13)+(\irrepbar{6},\irrep{1})(2)+(\irrep{6},\irrep{3})(-8)+(\irrep{8},\irrep{2})(-3)+(\irrepbar{10},\irrep{1})(12)+(\irrep{10},\irrep{4})(-3)+(\irrepbar{15},\irrep{2})(7)+(\irrep{15},\irrep{3})(2)$\\
\bottomrule
\end{longtable}
\newpage
\begin{longtable}{rcp{0.9\textwidth}}
\caption{\label{tab:SU6BranchingRules}SU(6) Branching Rules}\\
\endfirsthead
\caption[]{SU(6) Branching Rules (continued)}\\
\endhead
\endfoot
\toprule
\rowcolor{tableheadcolor}
SU(6)& $\to$ &SU(5)${\times}$U(1)\\
\midrule
\irrep{6} & = & $(\irrep{1})(-5)+(\irrep{5})(1)$\\
\irrep{15} & = & $(\irrep{5})(-4)+(\irrep{10})(2)$\\
\irrep{20} & = & $(\irrep{10})(-3)+(\irrepbar{10})(3)$\\
\irrep{21} & = & $(\irrep{1})(-10)+(\irrep{5})(-4)+(\irrep{15})(2)$\\
\irrep{35} & = & $(\irrep{1})(0)+(\irrep{5})(6)+(\irrepbar{5})(-6)+(\irrep{24})(0)$\\
\irrep{56} & = & $(\irrep{1})(-15)+(\irrep{5})(-9)+(\irrep{15})(-3)+(\irrepbar{35})(3)$\\
\irrep{70} & = & $(\irrep{5})(-9)+(\irrep{10})(-3)+(\irrep{15})(-3)+(\irrepbar{40})(3)$\\
\irrep{84} & = & $(\irrep{5})(1)+(\irrep{10})(7)+(\irrep{24})(-5)+(\irrep{45})(1)$\\
\irrep{105} & = & $(\irrep{10})(2)+(\irrepbar{10})(8)+(\irrep{40})(2)+(\irrep{45})(-4)$\\
\irrep[1]{105} & = & $(\irrepbar{15})(8)+(\irrep{40})(2)+(\irrep{50})(-4)$\\
\irrep{120} & = & $(\irrep{1})(-5)+(\irrep{5})(1)+(\irrepbar{5})(-11)+(\irrep{15})(7)+(\irrep{24})(-5)+(\irrep{70})(1)$\\
\irrep{126} & = & $(\irrep{1})(20)+(\irrepbar{5})(14)+(\irrepbar{15})(8)+(\irrep{35})(2)+(\irrep[1]{70})(-4)$\\
\irrep{175} & = & $(\irrep{50})(6)+(\irrepbar{50})(-6)+(\irrep{75})(0)$\\
\irrep{189} & = & $(\irrep{24})(0)+(\irrep{45})(6)+(\irrepbar{45})(-6)+(\irrep{75})(0)$\\
\irrep{210} & = & $(\irrep{40})(7)+(\irrep{45})(1)+(\irrep{50})(1)+(\irrep{75})(-5)$\\
\irrep[1]{210} & = & $(\irrepbar{5})(14)+(\irrepbar{10})(8)+(\irrepbar{15})(8)+(\irrep{35})(2)+(\irrep{40})(2)+(\irrep{105})(-4)$\\
\irrep{252} & = & $(\irrep{1})(25)+(\irrepbar{5})(19)+(\irrepbar{15})(13)+(\irrep{35})(7)+(\irrep[1]{70})(1)+(\irrepbar[1]{126})(-5)$\\
\irrep{280} & = & $(\irrepbar{5})(-6)+(\irrepbar{10})(-12)+(\irrep{24})(0)+(\irrepbar{45})(-6)+(\irrep{70})(6)+(\irrep{126})(0)$\\
\irrep{315} & = & $(\irrep{1})(-10)+(\irrep{5})(-4)+(\irrepbar{5})(-16)+(\irrep{15})(2)+(\irrep{24})(-10)+(\irrepbar{35})(8)+(\irrep{70})(-4)+(\irrep{160})(2)$\\
\bottomrule
\rowcolor{white}\\[-\medskipamount]
\toprule
\rowcolor{tableheadcolor}
SU(6)& $\to$ &SU(4)${\times}$SU(2)${\times}$U(1)\\
\midrule
\irrep{6} & = & $(\irrep{1},\irrep{2})(-2)+(\irrep{4},\irrep{1})(1)$\\
\irrep{15} & = & $(\irrep{1},\irrep{1})(-4)+(\irrep{4},\irrep{2})(-1)+(\irrep{6},\irrep{1})(2)$\\
\irrep{20} & = & $(\irrep{4},\irrep{1})(-3)+(\irrepbar{4},\irrep{1})(3)+(\irrep{6},\irrep{2})(0)$\\
\irrep{21} & = & $(\irrep{1},\irrep{3})(-4)+(\irrep{4},\irrep{2})(-1)+(\irrep{10},\irrep{1})(2)$\\
\irrep{35} & = & $(\irrep{1},\irrep{1})(0)+(\irrep{1},\irrep{3})(0)+(\irrep{4},\irrep{2})(3)+(\irrepbar{4},\irrep{2})(-3)+(\irrep{15},\irrep{1})(0)$\\
\irrep{56} & = & $(\irrep{1},\irrep{4})(-6)+(\irrep{4},\irrep{3})(-3)+(\irrep{10},\irrep{2})(0)+(\irrepbar[2]{20},\irrep{1})(3)$\\
\irrep{70} & = & $(\irrep{1},\irrep{2})(-6)+(\irrep{4},\irrep{1})(-3)+(\irrep{4},\irrep{3})(-3)+(\irrep{6},\irrep{2})(0)+(\irrep{10},\irrep{2})(0)+(\irrepbar{20},\irrep{1})(3)$\\
\irrep{84} & = & $(\irrep{1},\irrep{2})(-2)+(\irrep{4},\irrep{1})(1)+(\irrepbar{4},\irrep{1})(-5)+(\irrep{4},\irrep{3})(1)+(\irrep{6},\irrep{2})(4)+(\irrep{15},\irrep{2})(-2)+(\irrep{20},\irrep{1})(1)$\\
\irrep{105} & = & $(\irrep{4},\irrep{2})(-1)+(\irrepbar{4},\irrep{2})(5)+(\irrep{6},\irrep{1})(2)+(\irrep{6},\irrep{3})(2)+(\irrepbar{10},\irrep{1})(2)+(\irrep{15},\irrep{1})(-4)+(\irrep{20},\irrep{2})(-1)$\\
\irrep[1]{105} & = & $(\irrep{1},\irrep{1})(8)+(\irrepbar{4},\irrep{2})(5)+(\irrep{6},\irrep{1})(2)+(\irrepbar{10},\irrep{3})(2)+(\irrep[1]{20},\irrep{1})(-4)+(\irrep{20},\irrep{2})(-1)$\\
\irrep{120} & = & $(\irrep{1},\irrep{2})(-2)+(\irrep{4},\irrep{1})(1)+(\irrep{1},\irrep{4})(-2)+(\irrep{4},\irrep{3})(1)+(\irrepbar{4},\irrep{3})(-5)+(\irrep{10},\irrep{2})(4)+(\irrep{15},\irrep{2})(-2)+(\irrep{36},\irrep{1})(1)$\\
\irrep{126} & = & $(\irrep{1},\irrep{5})(8)+(\irrepbar{4},\irrep{4})(5)+(\irrepbar{10},\irrep{3})(2)+(\irrep[2]{20},\irrep{2})(-1)+(\irrepbar{35},\irrep{1})(-4)$\\
\irrep{175} & = & $(\irrep{10},\irrep{1})(-6)+(\irrepbar{10},\irrep{1})(6)+(\irrep{15},\irrep{1})(0)+(\irrep{20},\irrep{2})(3)+(\irrepbar{20},\irrep{2})(-3)+(\irrep[1]{20},\irrep{3})(0)$\\
\irrep{189} & = & $(\irrep{1},\irrep{1})(0)+(\irrep{4},\irrep{2})(3)+(\irrepbar{4},\irrep{2})(-3)+(\irrep{6},\irrep{1})(6)+(\irrep{6},\irrep{1})(-6)+(\irrep{15},\irrep{1})(0)+(\irrep{15},\irrep{3})(0)+(\irrep[1]{20},\irrep{1})(0)+(\irrep{20},\irrep{2})(3)+(\irrepbar{20},\irrep{2})(-3)$\\
\irrep{210} & = & $(\irrep{4},\irrep{1})(1)+(\irrepbar{4},\irrep{1})(7)+(\irrep{6},\irrep{2})(4)+(\irrepbar{10},\irrep{2})(4)+(\irrep{15},\irrep{2})(-2)+(\irrep{20},\irrep{1})(1)+(\irrepbar{20},\irrep{1})(-5)+(\irrep[1]{20},\irrep{2})(-2)+(\irrep{20},\irrep{3})(1)$\\
\irrep[1]{210} & = & $(\irrep{1},\irrep{3})(8)+(\irrepbar{4},\irrep{2})(5)+(\irrepbar{4},\irrep{4})(5)+(\irrep{6},\irrep{3})(2)+(\irrepbar{10},\irrep{1})(2)+(\irrepbar{10},\irrep{3})(2)+(\irrep{20},\irrep{2})(-1)+(\irrep[2]{20},\irrep{2})(-1)+(\irrepbar{45},\irrep{1})(-4)$\\
\irrep{252} & = & $(\irrep{1},\irrep{6})(10)+(\irrepbar{4},\irrep{5})(7)+(\irrepbar{10},\irrep{4})(4)+(\irrep[2]{20},\irrep{3})(1)+(\irrepbar{35},\irrep{2})(-2)+(\irrepbar{56},\irrep{1})(-5)$\\
\irrep{280} & = & $(\irrep{1},\irrep{3})(0)+(\irrep{4},\irrep{2})(3)+(\irrepbar{4},\irrep{2})(-3)+(\irrepbar{4},\irrep{4})(-3)+(\irrep{6},\irrep{3})(-6)+(\irrep{10},\irrep{1})(6)+(\irrep{15},\irrep{1})(0)+(\irrep{15},\irrep{3})(0)+(\irrepbar{20},\irrep{2})(-3)+(\irrep{36},\irrep{2})(3)+(\irrep{45},\irrep{1})(0)$\\
\irrep{315} & = & $(\irrep{1},\irrep{3})(-4)+(\irrep{4},\irrep{2})(-1)+(\irrep{1},\irrep{5})(-4)+(\irrep{4},\irrep{4})(-1)+(\irrepbar{4},\irrep{4})(-7)+(\irrep{10},\irrep{1})(2)+(\irrep{10},\irrep{3})(2)+(\irrep{15},\irrep{3})(-4)+(\irrepbar[2]{20},\irrep{2})(5)+(\irrep{36},\irrep{2})(-1)+(\irrep{70},\irrep{1})(2)$\\
\bottomrule
\rowcolor{white}\\[-\medskipamount]
\toprule
\rowcolor{tableheadcolor}
SU(6)& $\to$ &SU(3)${\times}$SU(3)${\times}$U(1)\\
\midrule
\irrep{6} & = & $(\irrep{3},\irrep{1})(1)+(\irrep{1},\irrep{3})(-1)$\\
\irrep{15} & = & $(\irrepbar{3},\irrep{1})(2)+(\irrep{1},\irrepbar{3})(-2)+(\irrep{3},\irrep{3})(0)$\\
\irrep{20} & = & $(\irrep{1},\irrep{1})(3)+(\irrep{1},\irrep{1})(-3)+(\irrep{3},\irrepbar{3})(-1)+(\irrepbar{3},\irrep{3})(1)$\\
\irrep{21} & = & $(\irrep{3},\irrep{3})(0)+(\irrep{6},\irrep{1})(2)+(\irrep{1},\irrep{6})(-2)$\\
\irrep{35} & = & $(\irrep{1},\irrep{1})(0)+(\irrep{3},\irrepbar{3})(2)+(\irrepbar{3},\irrep{3})(-2)+(\irrep{8},\irrep{1})(0)+(\irrep{1},\irrep{8})(0)$\\
\irrep{56} & = & $(\irrep{6},\irrep{3})(1)+(\irrep{3},\irrep{6})(-1)+(\irrep{10},\irrep{1})(3)+(\irrep{1},\irrep{10})(-3)$\\
\irrep{70} & = & $(\irrep{3},\irrepbar{3})(-1)+(\irrepbar{3},\irrep{3})(1)+(\irrep{8},\irrep{1})(3)+(\irrep{1},\irrep{8})(-3)+(\irrep{6},\irrep{3})(1)+(\irrep{3},\irrep{6})(-1)$\\
\irrep{84} & = & $(\irrep{3},\irrep{1})(1)+(\irrep{1},\irrep{3})(-1)+(\irrepbar{3},\irrepbar{3})(3)+(\irrepbar{3},\irrepbar{3})(-3)+(\irrepbar{6},\irrep{1})(1)+(\irrep{1},\irrepbar{6})(-1)+(\irrep{8},\irrep{3})(-1)+(\irrep{3},\irrep{8})(1)$\\
\irrep{105} & = & $(\irrepbar{3},\irrep{1})(2)+(\irrepbar{3},\irrep{1})(-4)+(\irrep{1},\irrepbar{3})(4)+(\irrep{1},\irrepbar{3})(-2)+(\irrep{3},\irrep{3})(0)+(\irrep{3},\irrepbar{6})(0)+(\irrepbar{6},\irrep{3})(0)+(\irrep{8},\irrepbar{3})(-2)+(\irrepbar{3},\irrep{8})(2)$\\
\irrep[1]{105} & = & $(\irrep{3},\irrep{3})(0)+(\irrep{6},\irrep{1})(-4)+(\irrep{1},\irrep{6})(4)+(\irrep{8},\irrepbar{3})(-2)+(\irrepbar{3},\irrep{8})(2)+(\irrepbar{6},\irrepbar{6})(0)$\\
\irrep{120} & = & $(\irrep{3},\irrep{1})(1)+(\irrep{1},\irrep{3})(-1)+(\irrep{6},\irrepbar{3})(3)+(\irrepbar{3},\irrep{6})(-3)+(\irrep{8},\irrep{3})(-1)+(\irrep{3},\irrep{8})(1)+(\irrep{15},\irrep{1})(1)+(\irrep{1},\irrep{15})(-1)$\\
\irrep{126} & = & $(\irrepbar{6},\irrepbar{6})(0)+(\irrepbar{10},\irrepbar{3})(-2)+(\irrepbar{3},\irrepbar{10})(2)+(\irrepbar[1]{15},\irrep{1})(-4)+(\irrep{1},\irrepbar[1]{15})(4)$\\
\irrep{175} & = & $(\irrep{1},\irrep{1})(6)+(\irrep{1},\irrep{1})(0)+(\irrep{1},\irrep{1})(-6)+(\irrep{3},\irrepbar{3})(2)+(\irrep{3},\irrepbar{3})(-4)+(\irrepbar{3},\irrep{3})(4)+(\irrepbar{3},\irrep{3})(-2)+(\irrep{6},\irrepbar{6})(-2)+(\irrepbar{6},\irrep{6})(2)+(\irrep{8},\irrep{8})(0)$\\
\irrep{189} & = & $(\irrep{1},\irrep{1})(0)+(\irrep{3},\irrepbar{3})(2)+(\irrep{3},\irrepbar{3})(-4)+(\irrepbar{3},\irrep{3})(4)+(\irrepbar{3},\irrep{3})(-2)+(\irrep{8},\irrep{1})(0)+(\irrep{1},\irrep{8})(0)+(\irrep{6},\irrep{3})(-2)+(\irrep{3},\irrep{6})(2)+(\irrepbar{6},\irrepbar{3})(2)+(\irrepbar{3},\irrepbar{6})(-2)+(\irrep{8},\irrep{8})(0)$\\
\irrep{210} & = & $(\irrep{3},\irrep{1})(1)+(\irrep{3},\irrep{1})(-5)+(\irrep{1},\irrep{3})(5)+(\irrep{1},\irrep{3})(-1)+(\irrepbar{3},\irrepbar{3})(3)+(\irrepbar{3},\irrepbar{3})(-3)+(\irrep{6},\irrepbar{3})(-3)+(\irrepbar{3},\irrep{6})(3)+(\irrep{8},\irrep{3})(-1)+(\irrep{3},\irrep{8})(1)+(\irrep{8},\irrepbar{6})(-1)+(\irrepbar{6},\irrep{8})(1)$\\
\irrep[1]{210} & = & $(\irrep{3},\irrepbar{6})(0)+(\irrepbar{6},\irrep{3})(0)+(\irrep{8},\irrepbar{3})(-2)+(\irrepbar{3},\irrep{8})(2)+(\irrepbar{6},\irrepbar{6})(0)+(\irrepbar{10},\irrepbar{3})(-2)+(\irrepbar{3},\irrepbar{10})(2)+(\irrepbar{15},\irrep{1})(-4)+(\irrep{1},\irrepbar{15})(4)$\\
\irrep{252} & = & $(\irrepbar{10},\irrepbar{6})(-1)+(\irrepbar{6},\irrepbar{10})(1)+(\irrepbar[1]{15},\irrepbar{3})(-3)+(\irrepbar{3},\irrepbar[1]{15})(3)+(\irrep{21},\irrep{1})(-5)+(\irrep{1},\irrep{21})(5)$\\
\irrep{280} & = & $(\irrep{3},\irrepbar{3})(2)+(\irrepbar{3},\irrep{3})(-2)+(\irrep{8},\irrep{1})(0)+(\irrep{1},\irrep{8})(0)+(\irrep{6},\irrep{3})(4)+(\irrep{6},\irrep{3})(-2)+(\irrep{3},\irrep{6})(2)+(\irrep{3},\irrep{6})(-4)+(\irrep{10},\irrep{1})(0)+(\irrep{1},\irrep{10})(0)+(\irrep{8},\irrep{8})(0)+(\irrep{15},\irrepbar{3})(2)+(\irrepbar{3},\irrep{15})(-2)$\\
\irrep{315} & = & $(\irrep{3},\irrep{3})(0)+(\irrep{6},\irrep{1})(2)+(\irrep{1},\irrep{6})(-2)+(\irrep{10},\irrepbar{3})(4)+(\irrepbar{3},\irrep{10})(-4)+(\irrep{6},\irrep{8})(2)+(\irrep{8},\irrep{6})(-2)+(\irrep{15},\irrep{3})(0)+(\irrep{3},\irrep{15})(0)+(\irrepbar{24},\irrep{1})(2)+(\irrep{1},\irrepbar{24})(-2)$\\
\bottomrule
\end{longtable}
\newpage
\begin{longtable}{rcp{0.9\textwidth}}
\caption{\label{tab:SU7BranchingRules}SU(7) Branching Rules}\\
\endfirsthead
\caption[]{SU(7) Branching Rules (continued)}\\
\endhead
\endfoot
\toprule
\rowcolor{tableheadcolor}
SU(7)& $\to$ &SU(6)${\times}$U(1)\\
\midrule
\irrep{7} & = & $(\irrep{1})(-6)+(\irrep{6})(1)$\\
\irrep{21} & = & $(\irrep{6})(-5)+(\irrep{15})(2)$\\
\irrep{28} & = & $(\irrep{1})(-12)+(\irrep{6})(-5)+(\irrep{21})(2)$\\
\irrep{35} & = & $(\irrep{15})(-4)+(\irrep{20})(3)$\\
\irrep{48} & = & $(\irrep{1})(0)+(\irrep{6})(7)+(\irrepbar{6})(-7)+(\irrep{35})(0)$\\
\irrep{84} & = & $(\irrep{1})(-18)+(\irrep{6})(-11)+(\irrep{21})(-4)+(\irrep{56})(3)$\\
\irrep{112} & = & $(\irrep{6})(-11)+(\irrep{15})(-4)+(\irrep{21})(-4)+(\irrep{70})(3)$\\
\irrep{140} & = & $(\irrep{6})(1)+(\irrep{15})(8)+(\irrep{35})(-6)+(\irrep{84})(1)$\\
\irrep{189} & = & $(\irrep{1})(-6)+(\irrep{6})(1)+(\irrepbar{6})(-13)+(\irrep{21})(8)+(\irrep{35})(-6)+(\irrep{120})(1)$\\
\irrep{196} & = & $(\irrepbar{21})(10)+(\irrepbar{70})(3)+(\irrep[1]{105})(-4)$\\
\irrep{210} & = & $(\irrepbar{15})(10)+(\irrep{20})(3)+(\irrepbar{70})(3)+(\irrep{105})(-4)$\\
\irrep[1]{210} & = & $(\irrep{1})(24)+(\irrepbar{6})(17)+(\irrepbar{21})(10)+(\irrepbar{56})(3)+(\irrep{126})(-4)$\\
\irrep{224} & = & $(\irrep{15})(2)+(\irrep{20})(9)+(\irrep{84})(-5)+(\irrep{105})(2)$\\
\irrep{378} & = & $(\irrepbar{6})(17)+(\irrepbar{15})(10)+(\irrepbar{21})(10)+(\irrepbar{56})(3)+(\irrepbar{70})(3)+(\irrep[1]{210})(-4)$\\
\irrep{392} & = & $(\irrep{35})(0)+(\irrep{84})(7)+(\irrepbar{84})(-7)+(\irrep{189})(0)$\\
\irrep{462} & = & $(\irrep{1})(30)+(\irrepbar{6})(23)+(\irrepbar{21})(16)+(\irrepbar{56})(9)+(\irrep{126})(2)+(\irrep{252})(-5)$\\
\irrep{490} & = & $(\irrepbar{70})(9)+(\irrep{105})(2)+(\irrep[1]{105})(2)+(\irrep{210})(-5)$\\
\irrep[1]{490} & = & $(\irrep[1]{105})(8)+(\irrep{175})(-6)+(\irrep{210})(1)$\\
\irrep{540} & = & $(\irrepbar{6})(-7)+(\irrepbar{15})(-14)+(\irrep{35})(0)+(\irrepbar{84})(-7)+(\irrep{120})(7)+(\irrep{280})(0)$\\
\bottomrule
\rowcolor{white}\\[-\medskipamount]
\toprule
\rowcolor{tableheadcolor}
SU(7)& $\to$ &SU(5)${\times}$SU(2)${\times}$U(1)\\
\midrule
\irrep{7} & = & $(\irrep{1},\irrep{2})(-5)+(\irrep{5},\irrep{1})(2)$\\
\irrep{21} & = & $(\irrep{1},\irrep{1})(-10)+(\irrep{5},\irrep{2})(-3)+(\irrep{10},\irrep{1})(4)$\\
\irrep{28} & = & $(\irrep{1},\irrep{3})(-10)+(\irrep{5},\irrep{2})(-3)+(\irrep{15},\irrep{1})(4)$\\
\irrep{35} & = & $(\irrep{5},\irrep{1})(-8)+(\irrepbar{10},\irrep{1})(6)+(\irrep{10},\irrep{2})(-1)$\\
\irrep{48} & = & $(\irrep{1},\irrep{1})(0)+(\irrep{1},\irrep{3})(0)+(\irrep{5},\irrep{2})(7)+(\irrepbar{5},\irrep{2})(-7)+(\irrep{24},\irrep{1})(0)$\\
\irrep{84} & = & $(\irrep{1},\irrep{4})(-15)+(\irrep{5},\irrep{3})(-8)+(\irrep{15},\irrep{2})(-1)+(\irrepbar{35},\irrep{1})(6)$\\
\irrep{112} & = & $(\irrep{1},\irrep{2})(-15)+(\irrep{5},\irrep{1})(-8)+(\irrep{5},\irrep{3})(-8)+(\irrep{10},\irrep{2})(-1)+(\irrep{15},\irrep{2})(-1)+(\irrepbar{40},\irrep{1})(6)$\\
\irrep{140} & = & $(\irrep{1},\irrep{2})(-5)+(\irrep{5},\irrep{1})(2)+(\irrepbar{5},\irrep{1})(-12)+(\irrep{5},\irrep{3})(2)+(\irrep{10},\irrep{2})(9)+(\irrep{24},\irrep{2})(-5)+(\irrep{45},\irrep{1})(2)$\\
\irrep{189} & = & $(\irrep{1},\irrep{2})(-5)+(\irrep{1},\irrep{4})(-5)+(\irrep{5},\irrep{1})(2)+(\irrep{5},\irrep{3})(2)+(\irrepbar{5},\irrep{3})(-12)+(\irrep{15},\irrep{2})(9)+(\irrep{24},\irrep{2})(-5)+(\irrep{70},\irrep{1})(2)$\\
\irrep{196} & = & $(\irrep{1},\irrep{1})(20)+(\irrepbar{5},\irrep{2})(13)+(\irrepbar{10},\irrep{1})(6)+(\irrepbar{15},\irrep{3})(6)+(\irrep{40},\irrep{2})(-1)+(\irrep{50},\irrep{1})(-8)$\\
\irrep{210} & = & $(\irrepbar{5},\irrep{2})(13)+(\irrepbar{10},\irrep{1})(6)+(\irrep{10},\irrep{2})(-1)+(\irrepbar{10},\irrep{3})(6)+(\irrepbar{15},\irrep{1})(6)+(\irrep{40},\irrep{2})(-1)+(\irrep{45},\irrep{1})(-8)$\\
\irrep[1]{210} & = & $(\irrep{1},\irrep{5})(20)+(\irrepbar{5},\irrep{4})(13)+(\irrepbar{15},\irrep{3})(6)+(\irrep{35},\irrep{2})(-1)+(\irrep[1]{70},\irrep{1})(-8)$\\
\irrep{224} & = & $(\irrep{5},\irrep{2})(-3)+(\irrep{10},\irrep{1})(4)+(\irrepbar{10},\irrep{2})(11)+(\irrep{10},\irrep{3})(4)+(\irrep{24},\irrep{1})(-10)+(\irrep{40},\irrep{1})(4)+(\irrep{45},\irrep{2})(-3)$\\
\irrep{378} & = & $(\irrep{1},\irrep{3})(20)+(\irrepbar{5},\irrep{2})(13)+(\irrepbar{5},\irrep{4})(13)+(\irrepbar{10},\irrep{3})(6)+(\irrepbar{15},\irrep{1})(6)+(\irrepbar{15},\irrep{3})(6)+(\irrep{35},\irrep{2})(-1)+(\irrep{40},\irrep{2})(-1)+(\irrep{105},\irrep{1})(-8)$\\
\irrep{392} & = & $(\irrep{1},\irrep{1})(0)+(\irrep{5},\irrep{2})(7)+(\irrepbar{5},\irrep{2})(-7)+(\irrep{10},\irrep{1})(14)+(\irrepbar{10},\irrep{1})(-14)+(\irrep{24},\irrep{1})(0)+(\irrep{24},\irrep{3})(0)+(\irrep{45},\irrep{2})(7)+(\irrepbar{45},\irrep{2})(-7)+(\irrep{75},\irrep{1})(0)$\\
\irrep{462} & = & $(\irrep{1},\irrep{6})(25)+(\irrepbar{5},\irrep{5})(18)+(\irrepbar{15},\irrep{4})(11)+(\irrep{35},\irrep{3})(4)+(\irrep[1]{70},\irrep{2})(-3)+(\irrepbar[1]{126},\irrep{1})(-10)$\\
\irrep{490} & = & $(\irrepbar{5},\irrep{1})(18)+(\irrep{10},\irrep{1})(4)+(\irrepbar{10},\irrep{2})(11)+(\irrepbar{15},\irrep{2})(11)+(\irrep{40},\irrep{1})(4)+(\irrep{40},\irrep{3})(4)+(\irrep{45},\irrep{2})(-3)+(\irrep{50},\irrep{2})(-3)+(\irrep{75},\irrep{1})(-10)$\\
\irrep[1]{490} & = & $(\irrepbar{15},\irrep{1})(16)+(\irrep{40},\irrep{2})(9)+(\irrep{45},\irrep{1})(2)+(\irrepbar{50},\irrep{1})(-12)+(\irrep{50},\irrep{3})(2)+(\irrep{75},\irrep{2})(-5)$\\
\irrep{540} & = & $(\irrep{1},\irrep{3})(0)+(\irrep{5},\irrep{2})(7)+(\irrepbar{5},\irrep{2})(-7)+(\irrepbar{5},\irrep{4})(-7)+(\irrepbar{10},\irrep{3})(-14)+(\irrep{15},\irrep{1})(14)+(\irrep{24},\irrep{1})(0)+(\irrep{24},\irrep{3})(0)+(\irrepbar{45},\irrep{2})(-7)+(\irrep{70},\irrep{2})(7)+(\irrep{126},\irrep{1})(0)$\\
\bottomrule
\rowcolor{white}\\[-\medskipamount]
\pagebreak
\toprule
\rowcolor{tableheadcolor}
SU(7)& $\to$ &SU(4)${\times}$SU(3)${\times}$U(1)\\
\midrule
\irrep{7} & = & $(\irrep{1},\irrep{3})(-4)+(\irrep{4},\irrep{1})(3)$\\
\irrep{21} & = & $(\irrep{1},\irrepbar{3})(-8)+(\irrep{4},\irrep{3})(-1)+(\irrep{6},\irrep{1})(6)$\\
\irrep{28} & = & $(\irrep{4},\irrep{3})(-1)+(\irrep{1},\irrep{6})(-8)+(\irrep{10},\irrep{1})(6)$\\
\irrep{35} & = & $(\irrep{1},\irrep{1})(-12)+(\irrepbar{4},\irrep{1})(9)+(\irrep{4},\irrepbar{3})(-5)+(\irrep{6},\irrep{3})(2)$\\
\irrep{48} & = & $(\irrep{1},\irrep{1})(0)+(\irrep{4},\irrepbar{3})(7)+(\irrepbar{4},\irrep{3})(-7)+(\irrep{1},\irrep{8})(0)+(\irrep{15},\irrep{1})(0)$\\
\irrep{84} & = & $(\irrep{4},\irrep{6})(-5)+(\irrep{1},\irrep{10})(-12)+(\irrep{10},\irrep{3})(2)+(\irrepbar[2]{20},\irrep{1})(9)$\\
\irrep{112} & = & $(\irrep{4},\irrepbar{3})(-5)+(\irrep{6},\irrep{3})(2)+(\irrep{1},\irrep{8})(-12)+(\irrep{4},\irrep{6})(-5)+(\irrep{10},\irrep{3})(2)+(\irrepbar{20},\irrep{1})(9)$\\
\irrep{140} & = & $(\irrep{1},\irrep{3})(-4)+(\irrep{4},\irrep{1})(3)+(\irrepbar{4},\irrepbar{3})(-11)+(\irrep{1},\irrepbar{6})(-4)+(\irrep{6},\irrepbar{3})(10)+(\irrep{4},\irrep{8})(3)+(\irrep{15},\irrep{3})(-4)+(\irrep{20},\irrep{1})(3)$\\
\irrep{189} & = & $(\irrep{1},\irrep{3})(-4)+(\irrep{4},\irrep{1})(3)+(\irrepbar{4},\irrep{6})(-11)+(\irrep{4},\irrep{8})(3)+(\irrep{10},\irrepbar{3})(10)+(\irrep{1},\irrep{15})(-4)+(\irrep{15},\irrep{3})(-4)+(\irrep{36},\irrep{1})(3)$\\
\irrep{196} & = & $(\irrep{1},\irrep{6})(16)+(\irrep{6},\irrep{3})(2)+(\irrepbar{4},\irrep{8})(9)+(\irrepbar{10},\irrepbar{6})(2)+(\irrep[1]{20},\irrep{1})(-12)+(\irrep{20},\irrepbar{3})(-5)$\\
\irrep{210} & = & $(\irrep{1},\irrepbar{3})(16)+(\irrepbar{4},\irrep{1})(9)+(\irrep{4},\irrepbar{3})(-5)+(\irrep{6},\irrep{3})(2)+(\irrep{6},\irrepbar{6})(2)+(\irrepbar{4},\irrep{8})(9)+(\irrepbar{10},\irrep{3})(2)+(\irrep{15},\irrep{1})(-12)+(\irrep{20},\irrepbar{3})(-5)$\\
\irrep[1]{210} & = & $(\irrepbar{4},\irrepbar{10})(9)+(\irrepbar{10},\irrepbar{6})(2)+(\irrep{1},\irrepbar[1]{15})(16)+(\irrep[2]{20},\irrepbar{3})(-5)+(\irrepbar{35},\irrep{1})(-12)$\\
\irrep{224} & = & $(\irrep{1},\irrepbar{3})(-8)+(\irrepbar{4},\irrep{1})(-15)+(\irrep{4},\irrep{3})(-1)+(\irrep{6},\irrep{1})(6)+(\irrepbar{4},\irrepbar{3})(13)+(\irrep{4},\irrepbar{6})(-1)+(\irrepbar{10},\irrep{1})(6)+(\irrep{6},\irrep{8})(6)+(\irrep{15},\irrepbar{3})(-8)+(\irrep{20},\irrep{3})(-1)$\\
\irrep{378} & = & $(\irrep{6},\irrepbar{6})(2)+(\irrepbar{4},\irrep{8})(9)+(\irrepbar{10},\irrep{3})(2)+(\irrepbar{4},\irrepbar{10})(9)+(\irrepbar{10},\irrepbar{6})(2)+(\irrep{1},\irrepbar{15})(16)+(\irrep{20},\irrepbar{3})(-5)+(\irrep[2]{20},\irrepbar{3})(-5)+(\irrepbar{45},\irrep{1})(-12)$\\
\irrep{392} & = & $(\irrep{1},\irrep{1})(0)+(\irrep{4},\irrepbar{3})(7)+(\irrepbar{4},\irrep{3})(-7)+(\irrep{6},\irrep{3})(14)+(\irrep{6},\irrepbar{3})(-14)+(\irrep{1},\irrep{8})(0)+(\irrep{4},\irrep{6})(7)+(\irrepbar{4},\irrepbar{6})(-7)+(\irrep{15},\irrep{1})(0)+(\irrep[1]{20},\irrep{1})(0)+(\irrep{15},\irrep{8})(0)+(\irrep{20},\irrepbar{3})(7)+(\irrepbar{20},\irrep{3})(-7)$\\
\irrep{462} & = & $(\irrepbar{4},\irrepbar[1]{15})(13)+(\irrepbar{10},\irrepbar{10})(6)+(\irrep{1},\irrep{21})(20)+(\irrep[2]{20},\irrepbar{6})(-1)+(\irrepbar{35},\irrepbar{3})(-8)+(\irrepbar{56},\irrep{1})(-15)$\\
\irrep{490} & = & $(\irrep{1},\irrep{3})(20)+(\irrep{4},\irrep{3})(-1)+(\irrep{6},\irrep{1})(6)+(\irrepbar{4},\irrepbar{3})(13)+(\irrepbar{4},\irrep{6})(13)+(\irrep{6},\irrep{8})(6)+(\irrep{15},\irrepbar{3})(-8)+(\irrepbar{10},\irrep{8})(6)+(\irrepbar{20},\irrep{1})(-15)+(\irrep{20},\irrep{3})(-1)+(\irrep[1]{20},\irrepbar{3})(-8)+(\irrep{20},\irrepbar{6})(-1)$\\
\irrep[1]{490} & = & $(\irrep{1},\irrep{1})(24)+(\irrep{4},\irrep{1})(3)+(\irrepbar{4},\irrep{3})(17)+(\irrep{6},\irrepbar{3})(10)+(\irrep{10},\irrep{1})(-18)+(\irrepbar{10},\irrep{6})(10)+(\irrep{15},\irrep{3})(-4)+(\irrepbar{20},\irrepbar{3})(-11)+(\irrep[1]{20},\irrepbar{6})(-4)+(\irrep{20},\irrep{8})(3)$\\
\irrep{540} & = & $(\irrep{4},\irrepbar{3})(7)+(\irrepbar{4},\irrep{3})(-7)+(\irrep{1},\irrep{8})(0)+(\irrep{4},\irrep{6})(7)+(\irrep{1},\irrep{10})(0)+(\irrep{6},\irrep{6})(-14)+(\irrep{10},\irrep{3})(14)+(\irrep{15},\irrep{1})(0)+(\irrepbar{4},\irrep{15})(-7)+(\irrep{15},\irrep{8})(0)+(\irrepbar{20},\irrep{3})(-7)+(\irrep{36},\irrepbar{3})(7)+(\irrep{45},\irrep{1})(0)$\\
\bottomrule
\end{longtable}
\newpage
{
\renewcommand{\arraystretch}{0.98}
\begin{longtable}{rcp{0.9\textwidth}}
\caption{\label{tab:SU8BranchingRules}SU(8) Branching Rules}\\
\endfirsthead
\caption[]{SU(8) Branching Rules (continued)}\\
\endhead
\endfoot
\toprule
\rowcolor{tableheadcolor}
SU(8)& $\to$ &SU(7)${\times}$U(1)\\
\midrule
\irrep{8} & = & $(\irrep{1})(-7)+(\irrep{7})(1)$\\
\irrep{28} & = & $(\irrep{7})(-6)+(\irrep{21})(2)$\\
\irrep{36} & = & $(\irrep{1})(-14)+(\irrep{7})(-6)+(\irrep{28})(2)$\\
\irrep{56} & = & $(\irrep{21})(-5)+(\irrep{35})(3)$\\
\irrep{63} & = & $(\irrep{1})(0)+(\irrep{7})(8)+(\irrepbar{7})(-8)+(\irrep{48})(0)$\\
\irrep{70} & = & $(\irrep{35})(-4)+(\irrepbar{35})(4)$\\
\irrep{120} & = & $(\irrep{1})(-21)+(\irrep{7})(-13)+(\irrep{28})(-5)+(\irrep{84})(3)$\\
\irrep{168} & = & $(\irrep{7})(-13)+(\irrep{21})(-5)+(\irrep{28})(-5)+(\irrep{112})(3)$\\
\irrep{216} & = & $(\irrep{7})(1)+(\irrep{21})(9)+(\irrep{48})(-7)+(\irrep{140})(1)$\\
\irrep{280} & = & $(\irrep{1})(-7)+(\irrep{7})(1)+(\irrepbar{7})(-15)+(\irrep{28})(9)+(\irrep{48})(-7)+(\irrep{189})(1)$\\
\irrep{330} & = & $(\irrep{1})(-28)+(\irrep{7})(-20)+(\irrep{28})(-12)+(\irrep{84})(-4)+(\irrepbar[1]{210})(4)$\\
\irrep{336} & = & $(\irrep{28})(-12)+(\irrep{112})(-4)+(\irrepbar{196})(4)$\\
\irrep{378} & = & $(\irrep{21})(-12)+(\irrep{35})(-4)+(\irrep{112})(-4)+(\irrepbar{210})(4)$\\
\irrep{420} & = & $(\irrep{21})(2)+(\irrep{35})(10)+(\irrep{140})(-6)+(\irrep{224})(2)$\\
\irrep{504} & = & $(\irrep{35})(3)+(\irrepbar{35})(11)+(\irrep{210})(3)+(\irrep{224})(-5)$\\
\irrep{630} & = & $(\irrep{7})(-20)+(\irrep{21})(-12)+(\irrep{28})(-12)+(\irrep{84})(-4)+(\irrep{112})(-4)+(\irrepbar{378})(4)$\\
\irrep{720} & = & $(\irrep{48})(0)+(\irrep{140})(8)+(\irrepbar{140})(-8)+(\irrep{392})(0)$\\
\bottomrule
\rowcolor{white}\\[-\medskipamount]
\toprule
\rowcolor{tableheadcolor}
SU(8)& $\to$ &SU(6)${\times}$SU(2)${\times}$U(1)\\
\midrule
\irrep{8} & = & $(\irrep{1},\irrep{2})(-3)+(\irrep{6},\irrep{1})(1)$\\
\irrep{28} & = & $(\irrep{1},\irrep{1})(-6)+(\irrep{6},\irrep{2})(-2)+(\irrep{15},\irrep{1})(2)$\\
\irrep{36} & = & $(\irrep{1},\irrep{3})(-6)+(\irrep{6},\irrep{2})(-2)+(\irrep{21},\irrep{1})(2)$\\
\irrep{56} & = & $(\irrep{6},\irrep{1})(-5)+(\irrep{15},\irrep{2})(-1)+(\irrep{20},\irrep{1})(3)$\\
\irrep{63} & = & $(\irrep{1},\irrep{1})(0)+(\irrep{1},\irrep{3})(0)+(\irrep{6},\irrep{2})(4)+(\irrepbar{6},\irrep{2})(-4)+(\irrep{35},\irrep{1})(0)$\\
\irrep{70} & = & $(\irrep{15},\irrep{1})(-4)+(\irrepbar{15},\irrep{1})(4)+(\irrep{20},\irrep{2})(0)$\\
\irrep{120} & = & $(\irrep{1},\irrep{4})(-9)+(\irrep{6},\irrep{3})(-5)+(\irrep{21},\irrep{2})(-1)+(\irrep{56},\irrep{1})(3)$\\
\irrep{168} & = & $(\irrep{1},\irrep{2})(-9)+(\irrep{6},\irrep{1})(-5)+(\irrep{6},\irrep{3})(-5)+(\irrep{15},\irrep{2})(-1)+(\irrep{21},\irrep{2})(-1)+(\irrep{70},\irrep{1})(3)$\\
\irrep{216} & = & $(\irrep{1},\irrep{2})(-3)+(\irrep{6},\irrep{1})(1)+(\irrepbar{6},\irrep{1})(-7)+(\irrep{6},\irrep{3})(1)+(\irrep{15},\irrep{2})(5)+(\irrep{35},\irrep{2})(-3)+(\irrep{84},\irrep{1})(1)$\\
\irrep{280} & = & $(\irrep{1},\irrep{2})(-3)+(\irrep{1},\irrep{4})(-3)+(\irrep{6},\irrep{1})(1)+(\irrep{6},\irrep{3})(1)+(\irrepbar{6},\irrep{3})(-7)+(\irrep{21},\irrep{2})(5)+(\irrep{35},\irrep{2})(-3)+(\irrep{120},\irrep{1})(1)$\\
\irrep{330} & = & $(\irrep{1},\irrep{5})(-12)+(\irrep{6},\irrep{4})(-8)+(\irrep{21},\irrep{3})(-4)+(\irrep{56},\irrep{2})(0)+(\irrepbar{126},\irrep{1})(4)$\\
\irrep{336} & = & $(\irrep{1},\irrep{1})(-12)+(\irrep{6},\irrep{2})(-8)+(\irrep{15},\irrep{1})(-4)+(\irrep{21},\irrep{3})(-4)+(\irrep{70},\irrep{2})(0)+(\irrepbar[1]{105},\irrep{1})(4)$\\
\irrep{378} & = & $(\irrep{6},\irrep{2})(-8)+(\irrep{15},\irrep{1})(-4)+(\irrep{15},\irrep{3})(-4)+(\irrep{20},\irrep{2})(0)+(\irrep{21},\irrep{1})(-4)+(\irrep{70},\irrep{2})(0)+(\irrepbar{105},\irrep{1})(4)$\\
\irrep{420} & = & $(\irrep{6},\irrep{2})(-2)+(\irrep{15},\irrep{1})(2)+(\irrep{15},\irrep{3})(2)+(\irrep{20},\irrep{2})(6)+(\irrep{35},\irrep{1})(-6)+(\irrep{84},\irrep{2})(-2)+(\irrep{105},\irrep{1})(2)$\\
\irrep{504} & = & $(\irrep{15},\irrep{2})(-1)+(\irrepbar{15},\irrep{2})(7)+(\irrep{20},\irrep{1})(3)+(\irrep{20},\irrep{3})(3)+(\irrepbar{70},\irrep{1})(3)+(\irrep{84},\irrep{1})(-5)+(\irrep{105},\irrep{2})(-1)$\\
\irrep{630} & = & $(\irrep{1},\irrep{3})(-12)+(\irrep{6},\irrep{2})(-8)+(\irrep{6},\irrep{4})(-8)+(\irrep{15},\irrep{3})(-4)+(\irrep{21},\irrep{1})(-4)+(\irrep{21},\irrep{3})(-4)+(\irrep{56},\irrep{2})(0)+(\irrep{70},\irrep{2})(0)+(\irrepbar[1]{210},\irrep{1})(4)$\\
\irrep{720} & = & $(\irrep{1},\irrep{1})(0)+(\irrep{6},\irrep{2})(4)+(\irrepbar{6},\irrep{2})(-4)+(\irrep{15},\irrep{1})(8)+(\irrepbar{15},\irrep{1})(-8)+(\irrep{35},\irrep{1})(0)+(\irrep{35},\irrep{3})(0)+(\irrep{84},\irrep{2})(4)+(\irrepbar{84},\irrep{2})(-4)+(\irrep{189},\irrep{1})(0)$\\
\bottomrule
\rowcolor{white}\\[-\medskipamount]
\newpage
\toprule
\rowcolor{tableheadcolor}
SU(8)& $\to$ &SU(5)${\times}$SU(3)${\times}$U(1)\\
\midrule
\irrep{8} & = & $(\irrep{1},\irrep{3})(-5)+(\irrep{5},\irrep{1})(3)$\\
\irrep{28} & = & $(\irrep{1},\irrepbar{3})(-10)+(\irrep{5},\irrep{3})(-2)+(\irrep{10},\irrep{1})(6)$\\
\irrep{36} & = & $(\irrep{1},\irrep{6})(-10)+(\irrep{5},\irrep{3})(-2)+(\irrep{15},\irrep{1})(6)$\\
\irrep{56} & = & $(\irrep{1},\irrep{1})(-15)+(\irrep{5},\irrepbar{3})(-7)+(\irrepbar{10},\irrep{1})(9)+(\irrep{10},\irrep{3})(1)$\\
\irrep{63} & = & $(\irrep{1},\irrep{1})(0)+(\irrep{5},\irrepbar{3})(8)+(\irrepbar{5},\irrep{3})(-8)+(\irrep{1},\irrep{8})(0)+(\irrep{24},\irrep{1})(0)$\\
\irrep{70} & = & $(\irrep{5},\irrep{1})(-12)+(\irrepbar{5},\irrep{1})(12)+(\irrep{10},\irrepbar{3})(-4)+(\irrepbar{10},\irrep{3})(4)$\\
\irrep{120} & = & $(\irrep{5},\irrep{6})(-7)+(\irrep{1},\irrep{10})(-15)+(\irrep{15},\irrep{3})(1)+(\irrepbar{35},\irrep{1})(9)$\\
\irrep{168} & = & $(\irrep{5},\irrepbar{3})(-7)+(\irrep{1},\irrep{8})(-15)+(\irrep{5},\irrep{6})(-7)+(\irrep{10},\irrep{3})(1)+(\irrep{15},\irrep{3})(1)+(\irrepbar{40},\irrep{1})(9)$\\
\irrep{216} & = & $(\irrep{1},\irrep{3})(-5)+(\irrep{5},\irrep{1})(3)+(\irrep{1},\irrepbar{6})(-5)+(\irrepbar{5},\irrepbar{3})(-13)+(\irrep{10},\irrepbar{3})(11)+(\irrep{5},\irrep{8})(3)+(\irrep{24},\irrep{3})(-5)+(\irrep{45},\irrep{1})(3)$\\
\irrep{280} & = & $(\irrep{1},\irrep{3})(-5)+(\irrep{5},\irrep{1})(3)+(\irrepbar{5},\irrep{6})(-13)+(\irrep{5},\irrep{8})(3)+(\irrep{1},\irrep{15})(-5)+(\irrep{15},\irrepbar{3})(11)+(\irrep{24},\irrep{3})(-5)+(\irrep{70},\irrep{1})(3)$\\
\irrep{330} & = & $(\irrep{5},\irrep{10})(-12)+(\irrep{1},\irrep[1]{15})(-20)+(\irrep{15},\irrep{6})(-4)+(\irrepbar{35},\irrep{3})(4)+(\irrepbar[1]{70},\irrep{1})(12)$\\
\irrep{336} & = & $(\irrep{1},\irrepbar{6})(-20)+(\irrep{10},\irrepbar{3})(-4)+(\irrep{5},\irrep{8})(-12)+(\irrep{15},\irrep{6})(-4)+(\irrepbar{40},\irrep{3})(4)+(\irrepbar{50},\irrep{1})(12)$\\
\irrep{378} & = & $(\irrep{1},\irrep{3})(-20)+(\irrep{5},\irrep{1})(-12)+(\irrep{10},\irrepbar{3})(-4)+(\irrepbar{10},\irrep{3})(4)+(\irrep{5},\irrep{8})(-12)+(\irrep{10},\irrep{6})(-4)+(\irrep{15},\irrepbar{3})(-4)+(\irrepbar{40},\irrep{3})(4)+(\irrepbar{45},\irrep{1})(12)$\\
\irrep{420} & = & $(\irrep{1},\irrepbar{3})(-10)+(\irrepbar{5},\irrep{1})(-18)+(\irrep{5},\irrep{3})(-2)+(\irrep{10},\irrep{1})(6)+(\irrep{5},\irrepbar{6})(-2)+(\irrepbar{10},\irrepbar{3})(14)+(\irrep{10},\irrep{8})(6)+(\irrep{24},\irrepbar{3})(-10)+(\irrep{40},\irrep{1})(6)+(\irrep{45},\irrep{3})(-2)$\\
\irrep{504} & = & $(\irrep{5},\irrepbar{3})(-7)+(\irrepbar{5},\irrepbar{3})(17)+(\irrepbar{10},\irrep{1})(9)+(\irrep{10},\irrep{3})(1)+(\irrepbar{15},\irrep{1})(9)+(\irrep{10},\irrepbar{6})(1)+(\irrepbar{10},\irrep{8})(9)+(\irrep{24},\irrep{1})(-15)+(\irrep{40},\irrep{3})(1)+(\irrep{45},\irrepbar{3})(-7)$\\
\irrep{630} & = & $(\irrep{5},\irrep{8})(-12)+(\irrep{5},\irrep{10})(-12)+(\irrep{10},\irrep{6})(-4)+(\irrep{1},\irrep{15})(-20)+(\irrep{15},\irrepbar{3})(-4)+(\irrep{15},\irrep{6})(-4)+(\irrepbar{35},\irrep{3})(4)+(\irrepbar{40},\irrep{3})(4)+(\irrepbar{105},\irrep{1})(12)$\\
\irrep{720} & = & $(\irrep{1},\irrep{1})(0)+(\irrep{5},\irrepbar{3})(8)+(\irrepbar{5},\irrep{3})(-8)+(\irrep{1},\irrep{8})(0)+(\irrep{5},\irrep{6})(8)+(\irrepbar{5},\irrepbar{6})(-8)+(\irrep{10},\irrep{3})(16)+(\irrepbar{10},\irrepbar{3})(-16)+(\irrep{24},\irrep{1})(0)+(\irrep{24},\irrep{8})(0)+(\irrep{45},\irrepbar{3})(8)+(\irrepbar{45},\irrep{3})(-8)+(\irrep{75},\irrep{1})(0)$\\
\bottomrule
\rowcolor{white}\\[-\medskipamount]
\toprule
\rowcolor{tableheadcolor}
SU(8)& $\to$ &SU(4)${\times}$SU(4)${\times}$U(1)\\
\midrule
\irrep{8} & = & $(\irrep{4},\irrep{1})(1)+(\irrep{1},\irrep{4})(-1)$\\
\irrep{28} & = & $(\irrep{6},\irrep{1})(2)+(\irrep{1},\irrep{6})(-2)+(\irrep{4},\irrep{4})(0)$\\
\irrep{36} & = & $(\irrep{4},\irrep{4})(0)+(\irrep{10},\irrep{1})(2)+(\irrep{1},\irrep{10})(-2)$\\
\irrep{56} & = & $(\irrepbar{4},\irrep{1})(3)+(\irrep{1},\irrepbar{4})(-3)+(\irrep{4},\irrep{6})(-1)+(\irrep{6},\irrep{4})(1)$\\
\irrep{63} & = & $(\irrep{1},\irrep{1})(0)+(\irrep{4},\irrepbar{4})(2)+(\irrepbar{4},\irrep{4})(-2)+(\irrep{15},\irrep{1})(0)+(\irrep{1},\irrep{15})(0)$\\
\irrep{70} & = & $(\irrep{1},\irrep{1})(4)+(\irrep{1},\irrep{1})(-4)+(\irrep{4},\irrepbar{4})(-2)+(\irrepbar{4},\irrep{4})(2)+(\irrep{6},\irrep{6})(0)$\\
\irrep{120} & = & $(\irrep{10},\irrep{4})(1)+(\irrep{4},\irrep{10})(-1)+(\irrepbar[2]{20},\irrep{1})(3)+(\irrep{1},\irrepbar[2]{20})(-3)$\\
\irrep{168} & = & $(\irrep{4},\irrep{6})(-1)+(\irrep{6},\irrep{4})(1)+(\irrep{10},\irrep{4})(1)+(\irrep{4},\irrep{10})(-1)+(\irrepbar{20},\irrep{1})(3)+(\irrep{1},\irrepbar{20})(-3)$\\
\irrep{216} & = & $(\irrep{4},\irrep{1})(1)+(\irrep{1},\irrep{4})(-1)+(\irrep{6},\irrepbar{4})(3)+(\irrepbar{4},\irrep{6})(-3)+(\irrep{15},\irrep{4})(-1)+(\irrep{4},\irrep{15})(1)+(\irrep{20},\irrep{1})(1)+(\irrep{1},\irrep{20})(-1)$\\
\irrep{280} & = & $(\irrep{4},\irrep{1})(1)+(\irrep{1},\irrep{4})(-1)+(\irrep{10},\irrepbar{4})(3)+(\irrepbar{4},\irrep{10})(-3)+(\irrep{15},\irrep{4})(-1)+(\irrep{4},\irrep{15})(1)+(\irrep{36},\irrep{1})(1)+(\irrep{1},\irrep{36})(-1)$\\
\irrep{330} & = & $(\irrep{10},\irrep{10})(0)+(\irrepbar[2]{20},\irrep{4})(2)+(\irrep{4},\irrepbar[2]{20})(-2)+(\irrep{35},\irrep{1})(4)+(\irrep{1},\irrep{35})(-4)$\\
\irrep{336} & = & $(\irrep{6},\irrep{6})(0)+(\irrep{10},\irrep{10})(0)+(\irrep[1]{20},\irrep{1})(4)+(\irrep{1},\irrep[1]{20})(-4)+(\irrepbar{20},\irrep{4})(2)+(\irrep{4},\irrepbar{20})(-2)$\\
\irrep{378} & = & $(\irrep{4},\irrepbar{4})(-2)+(\irrepbar{4},\irrep{4})(2)+(\irrep{6},\irrep{6})(0)+(\irrep{15},\irrep{1})(4)+(\irrep{1},\irrep{15})(-4)+(\irrep{10},\irrep{6})(0)+(\irrep{6},\irrep{10})(0)+(\irrepbar{20},\irrep{4})(2)+(\irrep{4},\irrepbar{20})(-2)$\\
\irrep{420} & = & $(\irrep{6},\irrep{1})(2)+(\irrep{1},\irrep{6})(-2)+(\irrep{4},\irrep{4})(0)+(\irrepbar{4},\irrepbar{4})(4)+(\irrepbar{4},\irrepbar{4})(-4)+(\irrepbar{10},\irrep{1})(2)+(\irrep{1},\irrepbar{10})(-2)+(\irrep{15},\irrep{6})(-2)+(\irrep{6},\irrep{15})(2)+(\irrep{4},\irrep{20})(0)+(\irrep{20},\irrep{4})(0)$\\
\irrep{504} & = & $(\irrepbar{4},\irrep{1})(3)+(\irrepbar{4},\irrep{1})(-5)+(\irrep{1},\irrepbar{4})(5)+(\irrep{1},\irrepbar{4})(-3)+(\irrep{4},\irrep{6})(-1)+(\irrep{6},\irrep{4})(1)+(\irrep{4},\irrepbar{10})(-1)+(\irrepbar{10},\irrep{4})(1)+(\irrep{15},\irrepbar{4})(-3)+(\irrepbar{4},\irrep{15})(3)+(\irrep{20},\irrep{6})(-1)+(\irrep{6},\irrep{20})(1)$\\
\irrep{630} & = & $(\irrep{10},\irrep{6})(0)+(\irrep{6},\irrep{10})(0)+(\irrep{10},\irrep{10})(0)+(\irrepbar{20},\irrep{4})(2)+(\irrep{4},\irrepbar{20})(-2)+(\irrepbar[2]{20},\irrep{4})(2)+(\irrep{4},\irrepbar[2]{20})(-2)+(\irrep{45},\irrep{1})(4)+(\irrep{1},\irrep{45})(-4)$\\
\irrep{720} & = & $(\irrep{1},\irrep{1})(0)+(\irrep{4},\irrepbar{4})(2)+(\irrepbar{4},\irrep{4})(-2)+(\irrep{6},\irrep{6})(4)+(\irrep{6},\irrep{6})(-4)+(\irrep{15},\irrep{1})(0)+(\irrep{1},\irrep{15})(0)+(\irrep[1]{20},\irrep{1})(0)+(\irrep{1},\irrep[1]{20})(0)+(\irrepbar{20},\irrep{4})(-2)+(\irrep{4},\irrepbar{20})(2)+(\irrep{20},\irrepbar{4})(2)+(\irrepbar{4},\irrep{20})(-2)+(\irrep{15},\irrep{15})(0)$\\
\bottomrule
\end{longtable}
}
\newpage
\begin{longtable}{rcp{0.9\textwidth}}
\caption{\label{tab:SU9BranchingRules}SU(9) Branching Rules}\\
\endfirsthead
\caption[]{SU(9) Branching Rules (continued)}\\
\endhead
\endfoot
\toprule
\rowcolor{tableheadcolor}
SU(9)& $\to$ &SU(8)${\times}$U(1)\\
\midrule
\irrep{9} & = & $(\irrep{1})(-8)+(\irrep{8})(1)$\\
\irrep{36} & = & $(\irrep{8})(-7)+(\irrep{28})(2)$\\
\irrep{45} & = & $(\irrep{1})(-16)+(\irrep{8})(-7)+(\irrep{36})(2)$\\
\irrep{80} & = & $(\irrep{1})(0)+(\irrep{8})(9)+(\irrepbar{8})(-9)+(\irrep{63})(0)$\\
\irrep{84} & = & $(\irrep{28})(-6)+(\irrep{56})(3)$\\
\irrep{126} & = & $(\irrep{56})(-5)+(\irrep{70})(4)$\\
\irrep{165} & = & $(\irrep{1})(-24)+(\irrep{8})(-15)+(\irrep{36})(-6)+(\irrep{120})(3)$\\
\irrep{240} & = & $(\irrep{8})(-15)+(\irrep{28})(-6)+(\irrep{36})(-6)+(\irrep{168})(3)$\\
\irrep{315} & = & $(\irrep{8})(1)+(\irrep{28})(10)+(\irrep{63})(-8)+(\irrep{216})(1)$\\
\irrep{396} & = & $(\irrep{1})(-8)+(\irrep{8})(1)+(\irrepbar{8})(-17)+(\irrep{36})(10)+(\irrep{63})(-8)+(\irrep{280})(1)$\\
\irrep{495} & = & $(\irrep{1})(-32)+(\irrep{8})(-23)+(\irrep{36})(-14)+(\irrep{120})(-5)+(\irrep{330})(4)$\\
\irrep{540} & = & $(\irrep{36})(-14)+(\irrep{168})(-5)+(\irrep{336})(4)$\\
\irrep{630} & = & $(\irrep{28})(-14)+(\irrep{56})(-5)+(\irrep{168})(-5)+(\irrep{378})(4)$\\
\irrep{720} & = & $(\irrep{28})(2)+(\irrep{56})(11)+(\irrep{216})(-7)+(\irrep{420})(2)$\\
\irrep{990} & = & $(\irrep{8})(-23)+(\irrep{28})(-14)+(\irrep{36})(-14)+(\irrep{120})(-5)+(\irrep{168})(-5)+(\irrep{630})(4)$\\
\irrep{1008} & = & $(\irrepbar{56})(13)+(\irrep{70})(4)+(\irrepbar{378})(4)+(\irrep{504})(-5)$\\
\bottomrule
\rowcolor{white}\\[-\medskipamount]
\toprule
\rowcolor{tableheadcolor}
SU(9)& $\to$ &SU(7)${\times}$SU(2)${\times}$U(1)\\
\midrule
\irrep{9} & = & $(\irrep{1},\irrep{2})(-7)+(\irrep{7},\irrep{1})(2)$\\
\irrep{36} & = & $(\irrep{1},\irrep{1})(-14)+(\irrep{7},\irrep{2})(-5)+(\irrep{21},\irrep{1})(4)$\\
\irrep{45} & = & $(\irrep{1},\irrep{3})(-14)+(\irrep{7},\irrep{2})(-5)+(\irrep{28},\irrep{1})(4)$\\
\irrep{80} & = & $(\irrep{1},\irrep{1})(0)+(\irrep{1},\irrep{3})(0)+(\irrep{7},\irrep{2})(9)+(\irrepbar{7},\irrep{2})(-9)+(\irrep{48},\irrep{1})(0)$\\
\irrep{84} & = & $(\irrep{7},\irrep{1})(-12)+(\irrep{21},\irrep{2})(-3)+(\irrep{35},\irrep{1})(6)$\\
\irrep{126} & = & $(\irrep{21},\irrep{1})(-10)+(\irrepbar{35},\irrep{1})(8)+(\irrep{35},\irrep{2})(-1)$\\
\irrep{165} & = & $(\irrep{1},\irrep{4})(-21)+(\irrep{7},\irrep{3})(-12)+(\irrep{28},\irrep{2})(-3)+(\irrep{84},\irrep{1})(6)$\\
\irrep{240} & = & $(\irrep{1},\irrep{2})(-21)+(\irrep{7},\irrep{1})(-12)+(\irrep{7},\irrep{3})(-12)+(\irrep{21},\irrep{2})(-3)+(\irrep{28},\irrep{2})(-3)+(\irrep{112},\irrep{1})(6)$\\
\irrep{315} & = & $(\irrep{1},\irrep{2})(-7)+(\irrep{7},\irrep{1})(2)+(\irrepbar{7},\irrep{1})(-16)+(\irrep{7},\irrep{3})(2)+(\irrep{21},\irrep{2})(11)+(\irrep{48},\irrep{2})(-7)+(\irrep{140},\irrep{1})(2)$\\
\irrep{396} & = & $(\irrep{1},\irrep{2})(-7)+(\irrep{1},\irrep{4})(-7)+(\irrep{7},\irrep{1})(2)+(\irrep{7},\irrep{3})(2)+(\irrepbar{7},\irrep{3})(-16)+(\irrep{28},\irrep{2})(11)+(\irrep{48},\irrep{2})(-7)+(\irrep{189},\irrep{1})(2)$\\
\irrep{495} & = & $(\irrep{1},\irrep{5})(-28)+(\irrep{7},\irrep{4})(-19)+(\irrep{28},\irrep{3})(-10)+(\irrep{84},\irrep{2})(-1)+(\irrepbar[1]{210},\irrep{1})(8)$\\
\irrep{540} & = & $(\irrep{1},\irrep{1})(-28)+(\irrep{7},\irrep{2})(-19)+(\irrep{21},\irrep{1})(-10)+(\irrep{28},\irrep{3})(-10)+(\irrep{112},\irrep{2})(-1)+(\irrepbar{196},\irrep{1})(8)$\\
\irrep{630} & = & $(\irrep{7},\irrep{2})(-19)+(\irrep{21},\irrep{1})(-10)+(\irrep{21},\irrep{3})(-10)+(\irrep{28},\irrep{1})(-10)+(\irrep{35},\irrep{2})(-1)+(\irrep{112},\irrep{2})(-1)+(\irrepbar{210},\irrep{1})(8)$\\
\irrep{720} & = & $(\irrep{7},\irrep{2})(-5)+(\irrep{21},\irrep{1})(4)+(\irrep{21},\irrep{3})(4)+(\irrep{35},\irrep{2})(13)+(\irrep{48},\irrep{1})(-14)+(\irrep{140},\irrep{2})(-5)+(\irrep{224},\irrep{1})(4)$\\
\irrep{990} & = & $(\irrep{1},\irrep{3})(-28)+(\irrep{7},\irrep{2})(-19)+(\irrep{7},\irrep{4})(-19)+(\irrep{21},\irrep{3})(-10)+(\irrep{28},\irrep{1})(-10)+(\irrep{28},\irrep{3})(-10)+(\irrep{84},\irrep{2})(-1)+(\irrep{112},\irrep{2})(-1)+(\irrepbar{378},\irrep{1})(8)$\\
\irrep{1008} & = & $(\irrepbar{21},\irrep{2})(17)+(\irrepbar{35},\irrep{1})(8)+(\irrep{35},\irrep{2})(-1)+(\irrepbar{35},\irrep{3})(8)+(\irrepbar{112},\irrep{1})(8)+(\irrep{210},\irrep{2})(-1)+(\irrep{224},\irrep{1})(-10)$\\
\bottomrule
\rowcolor{white}\\[-\medskipamount]
\newpage
\toprule
\rowcolor{tableheadcolor}
SU(9)& $\to$ &SU(6)${\times}$SU(3)${\times}$U(1)\\
\midrule
\irrep{9} & = & $(\irrep{1},\irrep{3})(-2)+(\irrep{6},\irrep{1})(1)$\\
\irrep{36} & = & $(\irrep{1},\irrepbar{3})(-4)+(\irrep{6},\irrep{3})(-1)+(\irrep{15},\irrep{1})(2)$\\
\irrep{45} & = & $(\irrep{1},\irrep{6})(-4)+(\irrep{6},\irrep{3})(-1)+(\irrep{21},\irrep{1})(2)$\\
\irrep{80} & = & $(\irrep{1},\irrep{1})(0)+(\irrep{6},\irrepbar{3})(3)+(\irrepbar{6},\irrep{3})(-3)+(\irrep{1},\irrep{8})(0)+(\irrep{35},\irrep{1})(0)$\\
\irrep{84} & = & $(\irrep{1},\irrep{1})(-6)+(\irrep{6},\irrepbar{3})(-3)+(\irrep{15},\irrep{3})(0)+(\irrep{20},\irrep{1})(3)$\\
\irrep{126} & = & $(\irrep{6},\irrep{1})(-5)+(\irrepbar{15},\irrep{1})(4)+(\irrep{15},\irrepbar{3})(-2)+(\irrep{20},\irrep{3})(1)$\\
\irrep{165} & = & $(\irrep{1},\irrep{10})(-6)+(\irrep{6},\irrep{6})(-3)+(\irrep{21},\irrep{3})(0)+(\irrep{56},\irrep{1})(3)$\\
\irrep{240} & = & $(\irrep{6},\irrepbar{3})(-3)+(\irrep{1},\irrep{8})(-6)+(\irrep{6},\irrep{6})(-3)+(\irrep{15},\irrep{3})(0)+(\irrep{21},\irrep{3})(0)+(\irrep{70},\irrep{1})(3)$\\
\irrep{315} & = & $(\irrep{1},\irrep{3})(-2)+(\irrep{6},\irrep{1})(1)+(\irrep{1},\irrepbar{6})(-2)+(\irrepbar{6},\irrepbar{3})(-5)+(\irrep{6},\irrep{8})(1)+(\irrep{15},\irrepbar{3})(4)+(\irrep{35},\irrep{3})(-2)+(\irrep{84},\irrep{1})(1)$\\
\irrep{396} & = & $(\irrep{1},\irrep{3})(-2)+(\irrep{6},\irrep{1})(1)+(\irrepbar{6},\irrep{6})(-5)+(\irrep{6},\irrep{8})(1)+(\irrep{1},\irrep{15})(-2)+(\irrep{21},\irrepbar{3})(4)+(\irrep{35},\irrep{3})(-2)+(\irrep{120},\irrep{1})(1)$\\
\irrep{495} & = & $(\irrep{6},\irrep{10})(-5)+(\irrep{1},\irrep[1]{15})(-8)+(\irrep{21},\irrep{6})(-2)+(\irrep{56},\irrep{3})(1)+(\irrepbar{126},\irrep{1})(4)$\\
\irrep{540} & = & $(\irrep{1},\irrepbar{6})(-8)+(\irrep{6},\irrep{8})(-5)+(\irrep{15},\irrepbar{3})(-2)+(\irrep{21},\irrep{6})(-2)+(\irrep{70},\irrep{3})(1)+(\irrepbar[1]{105},\irrep{1})(4)$\\
\irrep{630} & = & $(\irrep{1},\irrep{3})(-8)+(\irrep{6},\irrep{1})(-5)+(\irrep{6},\irrep{8})(-5)+(\irrep{15},\irrepbar{3})(-2)+(\irrep{15},\irrep{6})(-2)+(\irrep{20},\irrep{3})(1)+(\irrep{21},\irrepbar{3})(-2)+(\irrep{70},\irrep{3})(1)+(\irrepbar{105},\irrep{1})(4)$\\
\irrep{720} & = & $(\irrep{1},\irrepbar{3})(-4)+(\irrepbar{6},\irrep{1})(-7)+(\irrep{6},\irrep{3})(-1)+(\irrep{6},\irrepbar{6})(-1)+(\irrep{15},\irrep{1})(2)+(\irrep{20},\irrepbar{3})(5)+(\irrep{15},\irrep{8})(2)+(\irrep{35},\irrepbar{3})(-4)+(\irrep{84},\irrep{3})(-1)+(\irrep{105},\irrep{1})(2)$\\
\irrep{990} & = & $(\irrep{6},\irrep{8})(-5)+(\irrep{6},\irrep{10})(-5)+(\irrep{1},\irrep{15})(-8)+(\irrep{15},\irrep{6})(-2)+(\irrep{21},\irrepbar{3})(-2)+(\irrep{21},\irrep{6})(-2)+(\irrep{56},\irrep{3})(1)+(\irrep{70},\irrep{3})(1)+(\irrepbar[1]{210},\irrep{1})(4)$\\
\irrep{1008} & = & $(\irrepbar{6},\irrepbar{3})(7)+(\irrepbar{15},\irrep{1})(4)+(\irrep{15},\irrepbar{3})(-2)+(\irrepbar{21},\irrep{1})(4)+(\irrep{20},\irrep{3})(1)+(\irrepbar{15},\irrep{8})(4)+(\irrep{20},\irrepbar{6})(1)+(\irrepbar{70},\irrep{3})(1)+(\irrep{84},\irrep{1})(-5)+(\irrep{105},\irrepbar{3})(-2)$\\
\bottomrule
\rowcolor{white}\\[-\medskipamount]
\toprule
\rowcolor{tableheadcolor}
SU(9)& $\to$ &SU(5)${\times}$SU(4)${\times}$U(1)\\
\midrule
\irrep{9} & = & $(\irrep{1},\irrep{4})(-5)+(\irrep{5},\irrep{1})(4)$\\
\irrep{36} & = & $(\irrep{1},\irrep{6})(-10)+(\irrep{5},\irrep{4})(-1)+(\irrep{10},\irrep{1})(8)$\\
\irrep{45} & = & $(\irrep{5},\irrep{4})(-1)+(\irrep{1},\irrep{10})(-10)+(\irrep{15},\irrep{1})(8)$\\
\irrep{80} & = & $(\irrep{1},\irrep{1})(0)+(\irrep{5},\irrepbar{4})(9)+(\irrepbar{5},\irrep{4})(-9)+(\irrep{1},\irrep{15})(0)+(\irrep{24},\irrep{1})(0)$\\
\irrep{84} & = & $(\irrep{1},\irrepbar{4})(-15)+(\irrep{5},\irrep{6})(-6)+(\irrepbar{10},\irrep{1})(12)+(\irrep{10},\irrep{4})(3)$\\
\irrep{126} & = & $(\irrep{1},\irrep{1})(-20)+(\irrepbar{5},\irrep{1})(16)+(\irrep{5},\irrepbar{4})(-11)+(\irrepbar{10},\irrep{4})(7)+(\irrep{10},\irrep{6})(-2)$\\
\irrep{165} & = & $(\irrep{5},\irrep{10})(-6)+(\irrep{15},\irrep{4})(3)+(\irrep{1},\irrepbar[2]{20})(-15)+(\irrepbar{35},\irrep{1})(12)$\\
\irrep{240} & = & $(\irrep{5},\irrep{6})(-6)+(\irrep{10},\irrep{4})(3)+(\irrep{5},\irrep{10})(-6)+(\irrep{15},\irrep{4})(3)+(\irrep{1},\irrepbar{20})(-15)+(\irrepbar{40},\irrep{1})(12)$\\
\irrep{315} & = & $(\irrep{1},\irrep{4})(-5)+(\irrep{5},\irrep{1})(4)+(\irrepbar{5},\irrep{6})(-14)+(\irrep{10},\irrepbar{4})(13)+(\irrep{5},\irrep{15})(4)+(\irrep{1},\irrep{20})(-5)+(\irrep{24},\irrep{4})(-5)+(\irrep{45},\irrep{1})(4)$\\
\irrep{396} & = & $(\irrep{1},\irrep{4})(-5)+(\irrep{5},\irrep{1})(4)+(\irrepbar{5},\irrep{10})(-14)+(\irrep{15},\irrepbar{4})(13)+(\irrep{5},\irrep{15})(4)+(\irrep{24},\irrep{4})(-5)+(\irrep{1},\irrep{36})(-5)+(\irrep{70},\irrep{1})(4)$\\
\irrep{495} & = & $(\irrep{15},\irrep{10})(-2)+(\irrep{5},\irrepbar[2]{20})(-11)+(\irrep{1},\irrep{35})(-20)+(\irrepbar{35},\irrep{4})(7)+(\irrepbar[1]{70},\irrep{1})(16)$\\
\irrep{540} & = & $(\irrep{10},\irrep{6})(-2)+(\irrep{1},\irrep[1]{20})(-20)+(\irrep{15},\irrep{10})(-2)+(\irrep{5},\irrepbar{20})(-11)+(\irrepbar{40},\irrep{4})(7)+(\irrepbar{50},\irrep{1})(16)$\\
\irrep{630} & = & $(\irrep{5},\irrepbar{4})(-11)+(\irrepbar{10},\irrep{4})(7)+(\irrep{10},\irrep{6})(-2)+(\irrep{1},\irrep{15})(-20)+(\irrep{10},\irrep{10})(-2)+(\irrep{15},\irrep{6})(-2)+(\irrep{5},\irrepbar{20})(-11)+(\irrepbar{40},\irrep{4})(7)+(\irrepbar{45},\irrep{1})(16)$\\
\irrep{720} & = & $(\irrep{1},\irrep{6})(-10)+(\irrep{5},\irrep{4})(-1)+(\irrepbar{5},\irrepbar{4})(-19)+(\irrep{10},\irrep{1})(8)+(\irrep{1},\irrepbar{10})(-10)+(\irrepbar{10},\irrepbar{4})(17)+(\irrep{10},\irrep{15})(8)+(\irrep{5},\irrep{20})(-1)+(\irrep{24},\irrep{6})(-10)+(\irrep{40},\irrep{1})(8)+(\irrep{45},\irrep{4})(-1)$\\
\irrep{990} & = & $(\irrep{10},\irrep{10})(-2)+(\irrep{15},\irrep{6})(-2)+(\irrep{15},\irrep{10})(-2)+(\irrep{5},\irrepbar{20})(-11)+(\irrep{5},\irrepbar[2]{20})(-11)+(\irrepbar{35},\irrep{4})(7)+(\irrepbar{40},\irrep{4})(7)+(\irrep{1},\irrep{45})(-20)+(\irrepbar{105},\irrep{1})(16)$\\
\irrep{1008} & = & $(\irrep{1},\irrepbar{4})(25)+(\irrepbar{5},\irrep{1})(16)+(\irrep{5},\irrepbar{4})(-11)+(\irrepbar{10},\irrep{4})(7)+(\irrep{10},\irrep{6})(-2)+(\irrepbar{15},\irrep{4})(7)+(\irrep{10},\irrepbar{10})(-2)+(\irrepbar{5},\irrep{15})(16)+(\irrep{24},\irrep{1})(-20)+(\irrepbar{10},\irrep{20})(7)+(\irrep{40},\irrep{6})(-2)+(\irrep{45},\irrepbar{4})(-11)$\\
\bottomrule
\end{longtable}
\newpage
\begin{longtable}{rcp{0.9\textwidth}}
\caption{\label{tab:SU10BranchingRules}SU(10) Branching Rules}\\
\endfirsthead
\caption[]{SU(10) Branching Rules (continued)}\\
\endhead
\endfoot
\toprule
\rowcolor{tableheadcolor}
SU(10)& $\to$ &SU(9)${\times}$U(1)\\
\midrule
\irrep{10} & = & $(\irrep{1})(-9)+(\irrep{9})(1)$\\
\irrep{45} & = & $(\irrep{9})(-8)+(\irrep{36})(2)$\\
\irrep{55} & = & $(\irrep{1})(-18)+(\irrep{9})(-8)+(\irrep{45})(2)$\\
\irrep{99} & = & $(\irrep{1})(0)+(\irrep{9})(10)+(\irrepbar{9})(-10)+(\irrep{80})(0)$\\
\irrep{120} & = & $(\irrep{36})(-7)+(\irrep{84})(3)$\\
\irrep{210} & = & $(\irrep{84})(-6)+(\irrep{126})(4)$\\
\irrep{220} & = & $(\irrep{1})(-27)+(\irrep{9})(-17)+(\irrep{45})(-7)+(\irrep{165})(3)$\\
\irrep{252} & = & $(\irrep{126})(-5)+(\irrepbar{126})(5)$\\
\irrep{330} & = & $(\irrep{9})(-17)+(\irrep{36})(-7)+(\irrep{45})(-7)+(\irrep{240})(3)$\\
\irrep{440} & = & $(\irrep{9})(1)+(\irrep{36})(11)+(\irrep{80})(-9)+(\irrep{315})(1)$\\
\irrep{540} & = & $(\irrep{1})(-9)+(\irrep{9})(1)+(\irrepbar{9})(-19)+(\irrep{45})(11)+(\irrep{80})(-9)+(\irrep{396})(1)$\\
\irrep{715} & = & $(\irrep{1})(-36)+(\irrep{9})(-26)+(\irrep{45})(-16)+(\irrep{165})(-6)+(\irrep{495})(4)$\\
\irrep{825} & = & $(\irrep{45})(-16)+(\irrep{240})(-6)+(\irrep{540})(4)$\\
\bottomrule
\rowcolor{white}\\[-\medskipamount]
\toprule
\rowcolor{tableheadcolor}
SU(10)& $\to$ &SU(8)${\times}$SU(2)${\times}$U(1)\\
\midrule
\irrep{10} & = & $(\irrep{1},\irrep{2})(-4)+(\irrep{8},\irrep{1})(1)$\\
\irrep{45} & = & $(\irrep{1},\irrep{1})(-8)+(\irrep{8},\irrep{2})(-3)+(\irrep{28},\irrep{1})(2)$\\
\irrep{55} & = & $(\irrep{1},\irrep{3})(-8)+(\irrep{8},\irrep{2})(-3)+(\irrep{36},\irrep{1})(2)$\\
\irrep{99} & = & $(\irrep{1},\irrep{1})(0)+(\irrep{1},\irrep{3})(0)+(\irrep{8},\irrep{2})(5)+(\irrepbar{8},\irrep{2})(-5)+(\irrep{63},\irrep{1})(0)$\\
\irrep{120} & = & $(\irrep{8},\irrep{1})(-7)+(\irrep{28},\irrep{2})(-2)+(\irrep{56},\irrep{1})(3)$\\
\irrep{210} & = & $(\irrep{28},\irrep{1})(-6)+(\irrep{56},\irrep{2})(-1)+(\irrep{70},\irrep{1})(4)$\\
\irrep{220} & = & $(\irrep{1},\irrep{4})(-12)+(\irrep{8},\irrep{3})(-7)+(\irrep{36},\irrep{2})(-2)+(\irrep{120},\irrep{1})(3)$\\
\irrep{252} & = & $(\irrep{56},\irrep{1})(-5)+(\irrepbar{56},\irrep{1})(5)+(\irrep{70},\irrep{2})(0)$\\
\irrep{330} & = & $(\irrep{1},\irrep{2})(-12)+(\irrep{8},\irrep{1})(-7)+(\irrep{8},\irrep{3})(-7)+(\irrep{28},\irrep{2})(-2)+(\irrep{36},\irrep{2})(-2)+(\irrep{168},\irrep{1})(3)$\\
\irrep{440} & = & $(\irrep{1},\irrep{2})(-4)+(\irrep{8},\irrep{1})(1)+(\irrepbar{8},\irrep{1})(-9)+(\irrep{8},\irrep{3})(1)+(\irrep{28},\irrep{2})(6)+(\irrep{63},\irrep{2})(-4)+(\irrep{216},\irrep{1})(1)$\\
\irrep{540} & = & $(\irrep{1},\irrep{2})(-4)+(\irrep{1},\irrep{4})(-4)+(\irrep{8},\irrep{1})(1)+(\irrep{8},\irrep{3})(1)+(\irrepbar{8},\irrep{3})(-9)+(\irrep{36},\irrep{2})(6)+(\irrep{63},\irrep{2})(-4)+(\irrep{280},\irrep{1})(1)$\\
\irrep{715} & = & $(\irrep{1},\irrep{5})(-16)+(\irrep{8},\irrep{4})(-11)+(\irrep{36},\irrep{3})(-6)+(\irrep{120},\irrep{2})(-1)+(\irrep{330},\irrep{1})(4)$\\
\irrep{825} & = & $(\irrep{1},\irrep{1})(-16)+(\irrep{8},\irrep{2})(-11)+(\irrep{28},\irrep{1})(-6)+(\irrep{36},\irrep{3})(-6)+(\irrep{168},\irrep{2})(-1)+(\irrep{336},\irrep{1})(4)$\\
\bottomrule
\rowcolor{white}\\[-\medskipamount]
\toprule
\rowcolor{tableheadcolor}
SU(10)& $\to$ &SU(7)${\times}$SU(3)${\times}$U(1)\\
\midrule
\irrep{10} & = & $(\irrep{1},\irrep{3})(-7)+(\irrep{7},\irrep{1})(3)$\\
\irrep{45} & = & $(\irrep{1},\irrepbar{3})(-14)+(\irrep{7},\irrep{3})(-4)+(\irrep{21},\irrep{1})(6)$\\
\irrep{55} & = & $(\irrep{1},\irrep{6})(-14)+(\irrep{7},\irrep{3})(-4)+(\irrep{28},\irrep{1})(6)$\\
\irrep{99} & = & $(\irrep{1},\irrep{1})(0)+(\irrep{1},\irrep{8})(0)+(\irrep{7},\irrepbar{3})(10)+(\irrepbar{7},\irrep{3})(-10)+(\irrep{48},\irrep{1})(0)$\\
\irrep{120} & = & $(\irrep{1},\irrep{1})(-21)+(\irrep{7},\irrepbar{3})(-11)+(\irrep{21},\irrep{3})(-1)+(\irrep{35},\irrep{1})(9)$\\
\irrep{210} & = & $(\irrep{7},\irrep{1})(-18)+(\irrep{21},\irrepbar{3})(-8)+(\irrepbar{35},\irrep{1})(12)+(\irrep{35},\irrep{3})(2)$\\
\irrep{220} & = & $(\irrep{1},\irrep{10})(-21)+(\irrep{7},\irrep{6})(-11)+(\irrep{28},\irrep{3})(-1)+(\irrep{84},\irrep{1})(9)$\\
\irrep{252} & = & $(\irrep{21},\irrep{1})(-15)+(\irrepbar{21},\irrep{1})(15)+(\irrep{35},\irrepbar{3})(-5)+(\irrepbar{35},\irrep{3})(5)$\\
\irrep{330} & = & $(\irrep{1},\irrep{8})(-21)+(\irrep{7},\irrepbar{3})(-11)+(\irrep{7},\irrep{6})(-11)+(\irrep{21},\irrep{3})(-1)+(\irrep{28},\irrep{3})(-1)+(\irrep{112},\irrep{1})(9)$\\
\irrep{440} & = & $(\irrep{1},\irrep{3})(-7)+(\irrep{1},\irrepbar{6})(-7)+(\irrep{7},\irrep{1})(3)+(\irrepbar{7},\irrepbar{3})(-17)+(\irrep{7},\irrep{8})(3)+(\irrep{21},\irrepbar{3})(13)+(\irrep{48},\irrep{3})(-7)+(\irrep{140},\irrep{1})(3)$\\
\irrep{540} & = & $(\irrep{1},\irrep{3})(-7)+(\irrep{7},\irrep{1})(3)+(\irrepbar{7},\irrep{6})(-17)+(\irrep{7},\irrep{8})(3)+(\irrep{1},\irrep{15})(-7)+(\irrep{28},\irrepbar{3})(13)+(\irrep{48},\irrep{3})(-7)+(\irrep{189},\irrep{1})(3)$\\
\irrep{715} & = & $(\irrep{1},\irrep[1]{15})(-28)+(\irrep{7},\irrep{10})(-18)+(\irrep{28},\irrep{6})(-8)+(\irrep{84},\irrep{3})(2)+(\irrepbar[1]{210},\irrep{1})(12)$\\
\irrep{825} & = & $(\irrep{1},\irrepbar{6})(-28)+(\irrep{7},\irrep{8})(-18)+(\irrep{21},\irrepbar{3})(-8)+(\irrep{28},\irrep{6})(-8)+(\irrep{112},\irrep{3})(2)+(\irrepbar{196},\irrep{1})(12)$\\
\bottomrule
\rowcolor{white}\\[-\medskipamount]
\toprule
\rowcolor{tableheadcolor}
SU(10)& $\to$ &SU(6)${\times}$SU(4)${\times}$U(1)\\
\midrule
\irrep{10} & = & $(\irrep{1},\irrep{4})(-3)+(\irrep{6},\irrep{1})(2)$\\
\irrep{45} & = & $(\irrep{1},\irrep{6})(-6)+(\irrep{6},\irrep{4})(-1)+(\irrep{15},\irrep{1})(4)$\\
\irrep{55} & = & $(\irrep{6},\irrep{4})(-1)+(\irrep{1},\irrep{10})(-6)+(\irrep{21},\irrep{1})(4)$\\
\irrep{99} & = & $(\irrep{1},\irrep{1})(0)+(\irrep{6},\irrepbar{4})(5)+(\irrepbar{6},\irrep{4})(-5)+(\irrep{1},\irrep{15})(0)+(\irrep{35},\irrep{1})(0)$\\
\irrep{120} & = & $(\irrep{1},\irrepbar{4})(-9)+(\irrep{6},\irrep{6})(-4)+(\irrep{15},\irrep{4})(1)+(\irrep{20},\irrep{1})(6)$\\
\irrep{210} & = & $(\irrep{1},\irrep{1})(-12)+(\irrep{6},\irrepbar{4})(-7)+(\irrepbar{15},\irrep{1})(8)+(\irrep{15},\irrep{6})(-2)+(\irrep{20},\irrep{4})(3)$\\
\irrep{220} & = & $(\irrep{6},\irrep{10})(-4)+(\irrep{1},\irrepbar[2]{20})(-9)+(\irrep{21},\irrep{4})(1)+(\irrep{56},\irrep{1})(6)$\\
\irrep{252} & = & $(\irrep{6},\irrep{1})(-10)+(\irrepbar{6},\irrep{1})(10)+(\irrep{15},\irrepbar{4})(-5)+(\irrepbar{15},\irrep{4})(5)+(\irrep{20},\irrep{6})(0)$\\
\irrep{330} & = & $(\irrep{6},\irrep{6})(-4)+(\irrep{6},\irrep{10})(-4)+(\irrep{15},\irrep{4})(1)+(\irrep{1},\irrepbar{20})(-9)+(\irrep{21},\irrep{4})(1)+(\irrep{70},\irrep{1})(6)$\\
\irrep{440} & = & $(\irrep{1},\irrep{4})(-3)+(\irrep{6},\irrep{1})(2)+(\irrepbar{6},\irrep{6})(-8)+(\irrep{15},\irrepbar{4})(7)+(\irrep{6},\irrep{15})(2)+(\irrep{1},\irrep{20})(-3)+(\irrep{35},\irrep{4})(-3)+(\irrep{84},\irrep{1})(2)$\\
\irrep{540} & = & $(\irrep{1},\irrep{4})(-3)+(\irrep{6},\irrep{1})(2)+(\irrepbar{6},\irrep{10})(-8)+(\irrep{6},\irrep{15})(2)+(\irrep{21},\irrepbar{4})(7)+(\irrep{1},\irrep{36})(-3)+(\irrep{35},\irrep{4})(-3)+(\irrep{120},\irrep{1})(2)$\\
\irrep{715} & = & $(\irrep{6},\irrepbar[2]{20})(-7)+(\irrep{21},\irrep{10})(-2)+(\irrep{1},\irrep{35})(-12)+(\irrep{56},\irrep{4})(3)+(\irrepbar{126},\irrep{1})(8)$\\
\irrep{825} & = & $(\irrep{15},\irrep{6})(-2)+(\irrep{1},\irrep[1]{20})(-12)+(\irrep{6},\irrepbar{20})(-7)+(\irrep{21},\irrep{10})(-2)+(\irrep{70},\irrep{4})(3)+(\irrepbar[1]{105},\irrep{1})(8)$\\
\bottomrule
\rowcolor{white}\\[-\medskipamount]
\toprule
\rowcolor{tableheadcolor}
SU(10)& $\to$ &SU(5)${\times}$SU(5)${\times}$U(1)\\
\midrule
\irrep{10} & = & $(\irrep{5},\irrep{1})(1)+(\irrep{1},\irrep{5})(-1)$\\
\irrep{45} & = & $(\irrep{5},\irrep{5})(0)+(\irrep{10},\irrep{1})(2)+(\irrep{1},\irrep{10})(-2)$\\
\irrep{55} & = & $(\irrep{5},\irrep{5})(0)+(\irrep{15},\irrep{1})(2)+(\irrep{1},\irrep{15})(-2)$\\
\irrep{99} & = & $(\irrep{1},\irrep{1})(0)+(\irrep{5},\irrepbar{5})(2)+(\irrepbar{5},\irrep{5})(-2)+(\irrep{24},\irrep{1})(0)+(\irrep{1},\irrep{24})(0)$\\
\irrep{120} & = & $(\irrepbar{10},\irrep{1})(3)+(\irrep{1},\irrepbar{10})(-3)+(\irrep{5},\irrep{10})(-1)+(\irrep{10},\irrep{5})(1)$\\
\irrep{210} & = & $(\irrepbar{5},\irrep{1})(4)+(\irrep{1},\irrepbar{5})(-4)+(\irrep{5},\irrepbar{10})(-2)+(\irrepbar{10},\irrep{5})(2)+(\irrep{10},\irrep{10})(0)$\\
\irrep{220} & = & $(\irrep{15},\irrep{5})(1)+(\irrep{5},\irrep{15})(-1)+(\irrepbar{35},\irrep{1})(3)+(\irrep{1},\irrepbar{35})(-3)$\\
\irrep{252} & = & $(\irrep{1},\irrep{1})(5)+(\irrep{1},\irrep{1})(-5)+(\irrep{5},\irrepbar{5})(-3)+(\irrepbar{5},\irrep{5})(3)+(\irrep{10},\irrepbar{10})(-1)+(\irrepbar{10},\irrep{10})(1)$\\
\irrep{330} & = & $(\irrep{5},\irrep{10})(-1)+(\irrep{10},\irrep{5})(1)+(\irrep{15},\irrep{5})(1)+(\irrep{5},\irrep{15})(-1)+(\irrepbar{40},\irrep{1})(3)+(\irrep{1},\irrepbar{40})(-3)$\\
\irrep{440} & = & $(\irrep{5},\irrep{1})(1)+(\irrep{1},\irrep{5})(-1)+(\irrep{10},\irrepbar{5})(3)+(\irrepbar{5},\irrep{10})(-3)+(\irrep{24},\irrep{5})(-1)+(\irrep{5},\irrep{24})(1)+(\irrep{45},\irrep{1})(1)+(\irrep{1},\irrep{45})(-1)$\\
\irrep{540} & = & $(\irrep{5},\irrep{1})(1)+(\irrep{1},\irrep{5})(-1)+(\irrep{15},\irrepbar{5})(3)+(\irrepbar{5},\irrep{15})(-3)+(\irrep{24},\irrep{5})(-1)+(\irrep{5},\irrep{24})(1)+(\irrep{70},\irrep{1})(1)+(\irrep{1},\irrep{70})(-1)$\\
\irrep{715} & = & $(\irrep{15},\irrep{15})(0)+(\irrepbar{35},\irrep{5})(2)+(\irrep{5},\irrepbar{35})(-2)+(\irrepbar[1]{70},\irrep{1})(4)+(\irrep{1},\irrepbar[1]{70})(-4)$\\
\irrep{825} & = & $(\irrep{10},\irrep{10})(0)+(\irrep{15},\irrep{15})(0)+(\irrepbar{40},\irrep{5})(2)+(\irrep{5},\irrepbar{40})(-2)+(\irrepbar{50},\irrep{1})(4)+(\irrep{1},\irrepbar{50})(-4)$\\
\bottomrule
\end{longtable}
\newpage
\begin{longtable}{rcp{0.9\textwidth}}
\caption{\label{tab:SU11BranchingRules}SU(11) Branching Rules}\\
\endfirsthead
\caption[]{SU(11) Branching Rules (continued)}\\
\endhead
\endfoot
\toprule
\rowcolor{tableheadcolor}
SU(11)& $\to$ &SU(10)${\times}$U(1)\\
\midrule
\irrep{11} & = & $(\irrep{1})(-10)+(\irrep{10})(1)$\\
\irrep{55} & = & $(\irrep{10})(-9)+(\irrep{45})(2)$\\
\irrep{66} & = & $(\irrep{1})(-20)+(\irrep{10})(-9)+(\irrep{55})(2)$\\
\irrep{120} & = & $(\irrep{1})(0)+(\irrep{10})(11)+(\irrepbar{10})(-11)+(\irrep{99})(0)$\\
\irrep{165} & = & $(\irrep{45})(-8)+(\irrep{120})(3)$\\
\irrep{286} & = & $(\irrep{1})(-30)+(\irrep{10})(-19)+(\irrep{55})(-8)+(\irrep{220})(3)$\\
\irrep{330} & = & $(\irrep{120})(-7)+(\irrep{210})(4)$\\
\irrep{440} & = & $(\irrep{10})(-19)+(\irrep{45})(-8)+(\irrep{55})(-8)+(\irrep{330})(3)$\\
\irrep{462} & = & $(\irrep{210})(-6)+(\irrep{252})(5)$\\
\irrep{594} & = & $(\irrep{10})(1)+(\irrep{45})(12)+(\irrep{99})(-10)+(\irrep{440})(1)$\\
\bottomrule
\rowcolor{white}\\[-\medskipamount]
\toprule
\rowcolor{tableheadcolor}
SU(11)& $\to$ &SU(9)${\times}$SU(2)${\times}$U(1)\\
\midrule
\irrep{11} & = & $(\irrep{1},\irrep{2})(-9)+(\irrep{9},\irrep{1})(2)$\\
\irrep{55} & = & $(\irrep{1},\irrep{1})(-18)+(\irrep{9},\irrep{2})(-7)+(\irrep{36},\irrep{1})(4)$\\
\irrep{66} & = & $(\irrep{1},\irrep{3})(-18)+(\irrep{9},\irrep{2})(-7)+(\irrep{45},\irrep{1})(4)$\\
\irrep{120} & = & $(\irrep{1},\irrep{1})(0)+(\irrep{1},\irrep{3})(0)+(\irrep{9},\irrep{2})(11)+(\irrepbar{9},\irrep{2})(-11)+(\irrep{80},\irrep{1})(0)$\\
\irrep{165} & = & $(\irrep{9},\irrep{1})(-16)+(\irrep{36},\irrep{2})(-5)+(\irrep{84},\irrep{1})(6)$\\
\irrep{286} & = & $(\irrep{1},\irrep{4})(-27)+(\irrep{9},\irrep{3})(-16)+(\irrep{45},\irrep{2})(-5)+(\irrep{165},\irrep{1})(6)$\\
\irrep{330} & = & $(\irrep{36},\irrep{1})(-14)+(\irrep{84},\irrep{2})(-3)+(\irrep{126},\irrep{1})(8)$\\
\irrep{440} & = & $(\irrep{1},\irrep{2})(-27)+(\irrep{9},\irrep{1})(-16)+(\irrep{9},\irrep{3})(-16)+(\irrep{36},\irrep{2})(-5)+(\irrep{45},\irrep{2})(-5)+(\irrep{240},\irrep{1})(6)$\\
\irrep{462} & = & $(\irrep{84},\irrep{1})(-12)+(\irrepbar{126},\irrep{1})(10)+(\irrep{126},\irrep{2})(-1)$\\
\irrep{594} & = & $(\irrep{1},\irrep{2})(-9)+(\irrep{9},\irrep{1})(2)+(\irrepbar{9},\irrep{1})(-20)+(\irrep{9},\irrep{3})(2)+(\irrep{36},\irrep{2})(13)+(\irrep{80},\irrep{2})(-9)+(\irrep{315},\irrep{1})(2)$\\
\bottomrule
\rowcolor{white}\\[-\medskipamount]
\toprule
\rowcolor{tableheadcolor}
SU(11)& $\to$ &SU(8)${\times}$SU(3)${\times}$U(1)\\
\midrule
\irrep{11} & = & $(\irrep{1},\irrep{3})(-8)+(\irrep{8},\irrep{1})(3)$\\
\irrep{55} & = & $(\irrep{1},\irrepbar{3})(-16)+(\irrep{8},\irrep{3})(-5)+(\irrep{28},\irrep{1})(6)$\\
\irrep{66} & = & $(\irrep{1},\irrep{6})(-16)+(\irrep{8},\irrep{3})(-5)+(\irrep{36},\irrep{1})(6)$\\
\irrep{120} & = & $(\irrep{1},\irrep{1})(0)+(\irrep{1},\irrep{8})(0)+(\irrep{8},\irrepbar{3})(11)+(\irrepbar{8},\irrep{3})(-11)+(\irrep{63},\irrep{1})(0)$\\
\irrep{165} & = & $(\irrep{1},\irrep{1})(-24)+(\irrep{8},\irrepbar{3})(-13)+(\irrep{28},\irrep{3})(-2)+(\irrep{56},\irrep{1})(9)$\\
\irrep{286} & = & $(\irrep{1},\irrep{10})(-24)+(\irrep{8},\irrep{6})(-13)+(\irrep{36},\irrep{3})(-2)+(\irrep{120},\irrep{1})(9)$\\
\irrep{330} & = & $(\irrep{8},\irrep{1})(-21)+(\irrep{28},\irrepbar{3})(-10)+(\irrep{56},\irrep{3})(1)+(\irrep{70},\irrep{1})(12)$\\
\irrep{440} & = & $(\irrep{1},\irrep{8})(-24)+(\irrep{8},\irrepbar{3})(-13)+(\irrep{8},\irrep{6})(-13)+(\irrep{28},\irrep{3})(-2)+(\irrep{36},\irrep{3})(-2)+(\irrep{168},\irrep{1})(9)$\\
\irrep{462} & = & $(\irrep{28},\irrep{1})(-18)+(\irrepbar{56},\irrep{1})(15)+(\irrep{56},\irrepbar{3})(-7)+(\irrep{70},\irrep{3})(4)$\\
\irrep{594} & = & $(\irrep{1},\irrep{3})(-8)+(\irrep{1},\irrepbar{6})(-8)+(\irrep{8},\irrep{1})(3)+(\irrepbar{8},\irrepbar{3})(-19)+(\irrep{8},\irrep{8})(3)+(\irrep{28},\irrepbar{3})(14)+(\irrep{63},\irrep{3})(-8)+(\irrep{216},\irrep{1})(3)$\\
\bottomrule
\rowcolor{white}\\[-\medskipamount]
\newpage
\toprule
\rowcolor{tableheadcolor}
SU(11)& $\to$ &SU(7)${\times}$SU(4)${\times}$U(1)\\
\midrule
\irrep{11} & = & $(\irrep{1},\irrep{4})(-7)+(\irrep{7},\irrep{1})(4)$\\
\irrep{55} & = & $(\irrep{1},\irrep{6})(-14)+(\irrep{7},\irrep{4})(-3)+(\irrep{21},\irrep{1})(8)$\\
\irrep{66} & = & $(\irrep{7},\irrep{4})(-3)+(\irrep{1},\irrep{10})(-14)+(\irrep{28},\irrep{1})(8)$\\
\irrep{120} & = & $(\irrep{1},\irrep{1})(0)+(\irrep{7},\irrepbar{4})(11)+(\irrepbar{7},\irrep{4})(-11)+(\irrep{1},\irrep{15})(0)+(\irrep{48},\irrep{1})(0)$\\
\irrep{165} & = & $(\irrep{1},\irrepbar{4})(-21)+(\irrep{7},\irrep{6})(-10)+(\irrep{21},\irrep{4})(1)+(\irrep{35},\irrep{1})(12)$\\
\irrep{286} & = & $(\irrep{7},\irrep{10})(-10)+(\irrep{1},\irrepbar[2]{20})(-21)+(\irrep{28},\irrep{4})(1)+(\irrep{84},\irrep{1})(12)$\\
\irrep{330} & = & $(\irrep{1},\irrep{1})(-28)+(\irrep{7},\irrepbar{4})(-17)+(\irrep{21},\irrep{6})(-6)+(\irrepbar{35},\irrep{1})(16)+(\irrep{35},\irrep{4})(5)$\\
\irrep{440} & = & $(\irrep{7},\irrep{6})(-10)+(\irrep{7},\irrep{10})(-10)+(\irrep{1},\irrepbar{20})(-21)+(\irrep{21},\irrep{4})(1)+(\irrep{28},\irrep{4})(1)+(\irrep{112},\irrep{1})(12)$\\
\irrep{462} & = & $(\irrep{7},\irrep{1})(-24)+(\irrepbar{21},\irrep{1})(20)+(\irrep{21},\irrepbar{4})(-13)+(\irrepbar{35},\irrep{4})(9)+(\irrep{35},\irrep{6})(-2)$\\
\irrep{594} & = & $(\irrep{1},\irrep{4})(-7)+(\irrep{7},\irrep{1})(4)+(\irrepbar{7},\irrep{6})(-18)+(\irrep{1},\irrep{20})(-7)+(\irrep{7},\irrep{15})(4)+(\irrep{21},\irrepbar{4})(15)+(\irrep{48},\irrep{4})(-7)+(\irrep{140},\irrep{1})(4)$\\
\bottomrule
\rowcolor{white}\\[-\medskipamount]
\toprule
\rowcolor{tableheadcolor}
SU(11)& $\to$ &SU(6)${\times}$SU(5)${\times}$U(1)\\
\midrule
\irrep{11} & = & $(\irrep{1},\irrep{5})(-6)+(\irrep{6},\irrep{1})(5)$\\
\irrep{55} & = & $(\irrep{6},\irrep{5})(-1)+(\irrep{1},\irrep{10})(-12)+(\irrep{15},\irrep{1})(10)$\\
\irrep{66} & = & $(\irrep{6},\irrep{5})(-1)+(\irrep{1},\irrep{15})(-12)+(\irrep{21},\irrep{1})(10)$\\
\irrep{120} & = & $(\irrep{1},\irrep{1})(0)+(\irrep{6},\irrepbar{5})(11)+(\irrepbar{6},\irrep{5})(-11)+(\irrep{1},\irrep{24})(0)+(\irrep{35},\irrep{1})(0)$\\
\irrep{165} & = & $(\irrep{1},\irrepbar{10})(-18)+(\irrep{6},\irrep{10})(-7)+(\irrep{15},\irrep{5})(4)+(\irrep{20},\irrep{1})(15)$\\
\irrep{286} & = & $(\irrep{6},\irrep{15})(-7)+(\irrep{21},\irrep{5})(4)+(\irrep{1},\irrepbar{35})(-18)+(\irrep{56},\irrep{1})(15)$\\
\irrep{330} & = & $(\irrep{1},\irrepbar{5})(-24)+(\irrep{6},\irrepbar{10})(-13)+(\irrepbar{15},\irrep{1})(20)+(\irrep{15},\irrep{10})(-2)+(\irrep{20},\irrep{5})(9)$\\
\irrep{440} & = & $(\irrep{6},\irrep{10})(-7)+(\irrep{15},\irrep{5})(4)+(\irrep{6},\irrep{15})(-7)+(\irrep{21},\irrep{5})(4)+(\irrep{1},\irrepbar{40})(-18)+(\irrep{70},\irrep{1})(15)$\\
\irrep{462} & = & $(\irrep{1},\irrep{1})(-30)+(\irrepbar{6},\irrep{1})(25)+(\irrep{6},\irrepbar{5})(-19)+(\irrepbar{15},\irrep{5})(14)+(\irrep{15},\irrepbar{10})(-8)+(\irrep{20},\irrep{10})(3)$\\
\irrep{594} & = & $(\irrep{1},\irrep{5})(-6)+(\irrep{6},\irrep{1})(5)+(\irrepbar{6},\irrep{10})(-17)+(\irrep{15},\irrepbar{5})(16)+(\irrep{6},\irrep{24})(5)+(\irrep{35},\irrep{5})(-6)+(\irrep{1},\irrep{45})(-6)+(\irrep{84},\irrep{1})(5)$\\
\bottomrule
\end{longtable}
\newpage
\begin{longtable}{rcp{0.9\textwidth}}
\caption{\label{tab:SU12BranchingRules}SU(12) Branching Rules}\\
\endfirsthead
\caption[]{SU(12) Branching Rules (continued)}\\
\endhead
\endfoot
\toprule
\rowcolor{tableheadcolor}
SU(12)& $\to$ &SU(11)${\times}$U(1)\\
\midrule
\irrep{12} & = & $(\irrep{1})(-11)+(\irrep{11})(1)$\\
\irrep{66} & = & $(\irrep{11})(-10)+(\irrep{55})(2)$\\
\irrep{78} & = & $(\irrep{1})(-22)+(\irrep{11})(-10)+(\irrep{66})(2)$\\
\irrep{143} & = & $(\irrep{1})(0)+(\irrep{11})(12)+(\irrepbar{11})(-12)+(\irrep{120})(0)$\\
\irrep{220} & = & $(\irrep{55})(-9)+(\irrep{165})(3)$\\
\irrep{364} & = & $(\irrep{1})(-33)+(\irrep{11})(-21)+(\irrep{66})(-9)+(\irrep{286})(3)$\\
\irrep{495} & = & $(\irrep{165})(-8)+(\irrep{330})(4)$\\
\irrep{572} & = & $(\irrep{11})(-21)+(\irrep{55})(-9)+(\irrep{66})(-9)+(\irrep{440})(3)$\\
\irrep{780} & = & $(\irrep{11})(1)+(\irrep{55})(13)+(\irrep{120})(-11)+(\irrep{594})(1)$\\
\irrep{792} & = & $(\irrep{330})(-7)+(\irrep{462})(5)$\\
\bottomrule
\rowcolor{white}\\[-\medskipamount]
\toprule
\rowcolor{tableheadcolor}
SU(12)& $\to$ &SU(10)${\times}$SU(2)${\times}$U(1)\\
\midrule
\irrep{12} & = & $(\irrep{1},\irrep{2})(-5)+(\irrep{10},\irrep{1})(1)$\\
\irrep{66} & = & $(\irrep{1},\irrep{1})(-10)+(\irrep{10},\irrep{2})(-4)+(\irrep{45},\irrep{1})(2)$\\
\irrep{78} & = & $(\irrep{1},\irrep{3})(-10)+(\irrep{10},\irrep{2})(-4)+(\irrep{55},\irrep{1})(2)$\\
\irrep{143} & = & $(\irrep{1},\irrep{1})(0)+(\irrep{1},\irrep{3})(0)+(\irrep{10},\irrep{2})(6)+(\irrepbar{10},\irrep{2})(-6)+(\irrep{99},\irrep{1})(0)$\\
\irrep{220} & = & $(\irrep{10},\irrep{1})(-9)+(\irrep{45},\irrep{2})(-3)+(\irrep{120},\irrep{1})(3)$\\
\irrep{364} & = & $(\irrep{1},\irrep{4})(-15)+(\irrep{10},\irrep{3})(-9)+(\irrep{55},\irrep{2})(-3)+(\irrep{220},\irrep{1})(3)$\\
\irrep{495} & = & $(\irrep{45},\irrep{1})(-8)+(\irrep{120},\irrep{2})(-2)+(\irrep{210},\irrep{1})(4)$\\
\irrep{572} & = & $(\irrep{1},\irrep{2})(-15)+(\irrep{10},\irrep{1})(-9)+(\irrep{10},\irrep{3})(-9)+(\irrep{45},\irrep{2})(-3)+(\irrep{55},\irrep{2})(-3)+(\irrep{330},\irrep{1})(3)$\\
\irrep{780} & = & $(\irrep{1},\irrep{2})(-5)+(\irrep{10},\irrep{1})(1)+(\irrepbar{10},\irrep{1})(-11)+(\irrep{10},\irrep{3})(1)+(\irrep{45},\irrep{2})(7)+(\irrep{99},\irrep{2})(-5)+(\irrep{440},\irrep{1})(1)$\\
\irrep{792} & = & $(\irrep{120},\irrep{1})(-7)+(\irrep{210},\irrep{2})(-1)+(\irrep{252},\irrep{1})(5)$\\
\bottomrule
\rowcolor{white}\\[-\medskipamount]
\toprule
\rowcolor{tableheadcolor}
SU(12)& $\to$ &SU(9)${\times}$SU(3)${\times}$U(1)\\
\midrule
\irrep{12} & = & $(\irrep{1},\irrep{3})(-3)+(\irrep{9},\irrep{1})(1)$\\
\irrep{66} & = & $(\irrep{1},\irrepbar{3})(-6)+(\irrep{9},\irrep{3})(-2)+(\irrep{36},\irrep{1})(2)$\\
\irrep{78} & = & $(\irrep{1},\irrep{6})(-6)+(\irrep{9},\irrep{3})(-2)+(\irrep{45},\irrep{1})(2)$\\
\irrep{143} & = & $(\irrep{1},\irrep{1})(0)+(\irrep{1},\irrep{8})(0)+(\irrep{9},\irrepbar{3})(4)+(\irrepbar{9},\irrep{3})(-4)+(\irrep{80},\irrep{1})(0)$\\
\irrep{220} & = & $(\irrep{1},\irrep{1})(-9)+(\irrep{9},\irrepbar{3})(-5)+(\irrep{36},\irrep{3})(-1)+(\irrep{84},\irrep{1})(3)$\\
\irrep{364} & = & $(\irrep{1},\irrep{10})(-9)+(\irrep{9},\irrep{6})(-5)+(\irrep{45},\irrep{3})(-1)+(\irrep{165},\irrep{1})(3)$\\
\irrep{495} & = & $(\irrep{9},\irrep{1})(-8)+(\irrep{36},\irrepbar{3})(-4)+(\irrep{84},\irrep{3})(0)+(\irrep{126},\irrep{1})(4)$\\
\irrep{572} & = & $(\irrep{1},\irrep{8})(-9)+(\irrep{9},\irrepbar{3})(-5)+(\irrep{9},\irrep{6})(-5)+(\irrep{36},\irrep{3})(-1)+(\irrep{45},\irrep{3})(-1)+(\irrep{240},\irrep{1})(3)$\\
\irrep{780} & = & $(\irrep{1},\irrep{3})(-3)+(\irrep{1},\irrepbar{6})(-3)+(\irrep{9},\irrep{1})(1)+(\irrepbar{9},\irrepbar{3})(-7)+(\irrep{9},\irrep{8})(1)+(\irrep{36},\irrepbar{3})(5)+(\irrep{80},\irrep{3})(-3)+(\irrep{315},\irrep{1})(1)$\\
\irrep{792} & = & $(\irrep{36},\irrep{1})(-7)+(\irrep{84},\irrepbar{3})(-3)+(\irrepbar{126},\irrep{1})(5)+(\irrep{126},\irrep{3})(1)$\\
\bottomrule
\rowcolor{white}\\[-\medskipamount]
\newpage
\toprule
\rowcolor{tableheadcolor}
SU(12)& $\to$ &SU(7)${\times}$SU(5)${\times}$U(1)\\
\midrule
\irrep{12} & = & $(\irrep{1},\irrep{5})(-7)+(\irrep{7},\irrep{1})(5)$\\
\irrep{66} & = & $(\irrep{1},\irrep{10})(-14)+(\irrep{7},\irrep{5})(-2)+(\irrep{21},\irrep{1})(10)$\\
\irrep{78} & = & $(\irrep{7},\irrep{5})(-2)+(\irrep{1},\irrep{15})(-14)+(\irrep{28},\irrep{1})(10)$\\
\irrep{143} & = & $(\irrep{1},\irrep{1})(0)+(\irrep{7},\irrepbar{5})(12)+(\irrepbar{7},\irrep{5})(-12)+(\irrep{1},\irrep{24})(0)+(\irrep{48},\irrep{1})(0)$\\
\irrep{220} & = & $(\irrep{1},\irrepbar{10})(-21)+(\irrep{7},\irrep{10})(-9)+(\irrep{21},\irrep{5})(3)+(\irrep{35},\irrep{1})(15)$\\
\irrep{364} & = & $(\irrep{7},\irrep{15})(-9)+(\irrep{28},\irrep{5})(3)+(\irrep{1},\irrepbar{35})(-21)+(\irrep{84},\irrep{1})(15)$\\
\irrep{495} & = & $(\irrep{1},\irrepbar{5})(-28)+(\irrep{7},\irrepbar{10})(-16)+(\irrep{21},\irrep{10})(-4)+(\irrepbar{35},\irrep{1})(20)+(\irrep{35},\irrep{5})(8)$\\
\irrep{572} & = & $(\irrep{7},\irrep{10})(-9)+(\irrep{7},\irrep{15})(-9)+(\irrep{21},\irrep{5})(3)+(\irrep{28},\irrep{5})(3)+(\irrep{1},\irrepbar{40})(-21)+(\irrep{112},\irrep{1})(15)$\\
\irrep{780} & = & $(\irrep{1},\irrep{5})(-7)+(\irrep{7},\irrep{1})(5)+(\irrepbar{7},\irrep{10})(-19)+(\irrep{21},\irrepbar{5})(17)+(\irrep{7},\irrep{24})(5)+(\irrep{1},\irrep{45})(-7)+(\irrep{48},\irrep{5})(-7)+(\irrep{140},\irrep{1})(5)$\\
\irrep{792} & = & $(\irrep{1},\irrep{1})(-35)+(\irrep{7},\irrepbar{5})(-23)+(\irrepbar{21},\irrep{1})(25)+(\irrep{21},\irrepbar{10})(-11)+(\irrepbar{35},\irrep{5})(13)+(\irrep{35},\irrep{10})(1)$\\
\bottomrule
\rowcolor{white}\\[-\medskipamount]
\toprule
\rowcolor{tableheadcolor}
SU(12)& $\to$ &SU(6)${\times}$SU(6)${\times}$U(1)\\
\midrule
\irrep{12} & = & $(\irrep{6},\irrep{1})(1)+(\irrep{1},\irrep{6})(-1)$\\
\irrep{66} & = & $(\irrep{6},\irrep{6})(0)+(\irrep{15},\irrep{1})(2)+(\irrep{1},\irrep{15})(-2)$\\
\irrep{78} & = & $(\irrep{6},\irrep{6})(0)+(\irrep{21},\irrep{1})(2)+(\irrep{1},\irrep{21})(-2)$\\
\irrep{143} & = & $(\irrep{1},\irrep{1})(0)+(\irrep{6},\irrepbar{6})(2)+(\irrepbar{6},\irrep{6})(-2)+(\irrep{35},\irrep{1})(0)+(\irrep{1},\irrep{35})(0)$\\
\irrep{220} & = & $(\irrep{6},\irrep{15})(-1)+(\irrep{15},\irrep{6})(1)+(\irrep{20},\irrep{1})(3)+(\irrep{1},\irrep{20})(-3)$\\
\irrep{364} & = & $(\irrep{21},\irrep{6})(1)+(\irrep{6},\irrep{21})(-1)+(\irrep{56},\irrep{1})(3)+(\irrep{1},\irrep{56})(-3)$\\
\irrep{495} & = & $(\irrepbar{15},\irrep{1})(4)+(\irrep{1},\irrepbar{15})(-4)+(\irrep{6},\irrep{20})(-2)+(\irrep{20},\irrep{6})(2)+(\irrep{15},\irrep{15})(0)$\\
\irrep{572} & = & $(\irrep{6},\irrep{15})(-1)+(\irrep{15},\irrep{6})(1)+(\irrep{21},\irrep{6})(1)+(\irrep{6},\irrep{21})(-1)+(\irrep{70},\irrep{1})(3)+(\irrep{1},\irrep{70})(-3)$\\
\irrep{780} & = & $(\irrep{6},\irrep{1})(1)+(\irrep{1},\irrep{6})(-1)+(\irrep{15},\irrepbar{6})(3)+(\irrepbar{6},\irrep{15})(-3)+(\irrep{35},\irrep{6})(-1)+(\irrep{6},\irrep{35})(1)+(\irrep{84},\irrep{1})(1)+(\irrep{1},\irrep{84})(-1)$\\
\irrep{792} & = & $(\irrepbar{6},\irrep{1})(5)+(\irrep{1},\irrepbar{6})(-5)+(\irrep{6},\irrepbar{15})(-3)+(\irrepbar{15},\irrep{6})(3)+(\irrep{15},\irrep{20})(-1)+(\irrep{20},\irrep{15})(1)$\\
\bottomrule
\end{longtable}
\newpage
\subsubsection{\SO{N}}
\begin{longtable}{rcp{0.9\textwidth}}
\caption{\label{tab:SO7BranchingRules}SO(7) Branching Rules}\\
\endfirsthead
\caption[]{SO(7) Branching Rules (continued)}\\
\endhead
\endfoot
\toprule
\rowcolor{tableheadcolor}
SO(7)& $\to$ &SU(4)\\
\midrule
\irrep{7} & = & $\irrep{1}+\irrep{6}$\\
\irrep{8} & = & $\irrep{4}+\irrepbar{4}$\\
\irrep{21} & = & $\irrep{6}+\irrep{15}$\\
\irrep{27} & = & $\irrep{1}+\irrep{6}+\irrep[1]{20}$\\
\irrep{35} & = & $\irrep{10}+\irrepbar{10}+\irrep{15}$\\
\irrep{48} & = & $\irrep{4}+\irrepbar{4}+\irrep{20}+\irrepbar{20}$\\
\irrep{77} & = & $\irrep{1}+\irrep{6}+\irrep[1]{20}+\irrep{50}$\\
\irrep{105} & = & $\irrep{6}+\irrep{15}+\irrep[1]{20}+\irrep{64}$\\
\irrep{112} & = & $\irrep{20}+\irrepbar{20}+\irrep{36}+\irrepbar{36}$\\
\irrep[1]{112} & = & $\irrep[2]{20}+\irrepbar[2]{20}+\irrep{36}+\irrepbar{36}$\\
\irrep{168} & = & $\irrep{4}+\irrepbar{4}+\irrep{20}+\irrepbar{20}+\irrep{60}+\irrepbar{60}$\\
\irrep[1]{168} & = & $\irrep[1]{20}+\irrep{64}+\irrep{84}$\\
\irrep{182} & = & $\irrep{1}+\irrep{6}+\irrep[1]{20}+\irrep{50}+\irrep{105}$\\
\irrep{189} & = & $\irrep{10}+\irrepbar{10}+\irrep{15}+\irrep{45}+\irrepbar{45}+\irrep{64}$\\
\irrep{294} & = & $\irrep{35}+\irrepbar{35}+\irrep{70}+\irrepbar{70}+\irrep{84}$\\
\irrep{330} & = & $\irrep{6}+\irrep{15}+\irrep[1]{20}+\irrep{50}+\irrep{64}+\irrep{175}$\\
\irrep{378} & = & $\irrep{45}+\irrepbar{45}+\irrep{64}+\irrep{70}+\irrepbar{70}+\irrep{84}$\\
\irrep[1]{378} & = & $\irrep{1}+\irrep{6}+\irrep[1]{20}+\irrep{50}+\irrep{105}+\irrep{196}$\\
\irrep{448} & = & $\irrep{4}+\irrepbar{4}+\irrep{20}+\irrepbar{20}+\irrep{60}+\irrepbar{60}+\irrep[1]{140}+\irrepbar[1]{140}$\\
\irrep{512} & = & $\irrep{20}+\irrepbar{20}+\irrep{36}+\irrepbar{36}+\irrep{60}+\irrepbar{60}+\irrep{140}+\irrepbar{140}$\\
\irrep{560} & = & $\irrep[2]{20}+\irrepbar[2]{20}+\irrep{36}+\irrepbar{36}+\irrep[1]{84}+\irrepbar[1]{84}+\irrep{140}+\irrepbar{140}$\\
\irrep{616} & = & $\irrep{10}+\irrepbar{10}+\irrep{15}+\irrep{45}+\irrepbar{45}+\irrep{64}+\irrep{126}+\irrepbar{126}+\irrep{175}$\\
\irrep{672} & = & $\irrep{56}+\irrepbar{56}+\irrep{120}+\irrepbar{120}+\irrep{160}+\irrepbar{160}$\\
\irrep{693} & = & $\irrep[1]{20}+\irrep{50}+\irrep{64}+\irrep{84}+\irrep{175}+\irrep{300}$\\
\irrep{714} & = & $\irrep{1}+\irrep{6}+\irrep[1]{20}+\irrep{50}+\irrep{105}+\irrep{196}+\irrep{336}$\\
\irrep{720} & = & $\irrep{60}+\irrepbar{60}+\irrep{140}+\irrepbar{140}+\irrep{160}+\irrepbar{160}$\\
\irrep{819} & = & $\irrep{6}+\irrep{15}+\irrep[1]{20}+\irrep{50}+\irrep{64}+\irrep{105}+\irrep{175}+\irrep{384}$\\
\irrep{825} & = & $\irrep{50}+\irrep{175}+\irrep{300}+\irrep[1]{300}$\\
\irrep{1008} & = & $\irrep[1]{84}+\irrepbar[1]{84}+\irrep{120}+\irrepbar{120}+\irrep{140}+\irrepbar{140}+\irrep{160}+\irrepbar{160}$\\
\irrep[1]{1008} & = & $\irrep{4}+\irrepbar{4}+\irrep{20}+\irrepbar{20}+\irrep{60}+\irrepbar{60}+\irrep[1]{140}+\irrepbar[1]{140}+\irrep[1]{280}+\irrepbar[1]{280}$\\
\irrep{1254} & = & $\irrep{1}+\irrep{6}+\irrep[1]{20}+\irrep{50}+\irrep{105}+\irrep{196}+\irrep{336}+\irrep[3]{540}$\\
\irrep{1386} & = & $\irrep{35}+\irrepbar{35}+\irrep{70}+\irrepbar{70}+\irrep{84}+\irrep[2]{140}+\irrepbar[2]{140}+\irrep{256}+\irrepbar{256}+\irrep{300}$\\
\irrep[1]{1386} & = & $\irrep[2]{84}+\irrepbar[2]{84}+\irrep{189}+\irrepbar{189}+\irrep{270}+\irrepbar{270}+\irrep[1]{300}$\\
\irrep{1512} & = & $\irrep{20}+\irrepbar{20}+\irrep{36}+\irrepbar{36}+\irrep{60}+\irrepbar{60}+\irrep{140}+\irrepbar{140}+\irrep[1]{140}+\irrepbar[1]{140}+\irrep{360}+\irrepbar{360}$\\
\irrep{1560} & = & $\irrep{10}+\irrepbar{10}+\irrep{15}+\irrep{45}+\irrepbar{45}+\irrep{64}+\irrep{126}+\irrepbar{126}+\irrep{175}+\irrep{280}+\irrepbar{280}+\irrep{384}$\\
\irrep{1617} & = & $\irrep{45}+\irrepbar{45}+\irrep{64}+\irrep{70}+\irrepbar{70}+\irrep{84}+\irrep{126}+\irrepbar{126}+\irrep{175}+\irrep{256}+\irrepbar{256}+\irrep{300}$\\
\irrep{1728} & = & $\irrep[2]{20}+\irrepbar[2]{20}+\irrep{36}+\irrepbar{36}+\irrep[1]{84}+\irrepbar[1]{84}+\irrep{140}+\irrepbar{140}+\irrep{224}+\irrepbar{224}+\irrep{360}+\irrepbar{360}$\\
\irrep{1750} & = & $\irrep{6}+\irrep{15}+\irrep[1]{20}+\irrep{50}+\irrep{64}+\irrep{105}+\irrep{175}+\irrep{196}+\irrep{384}+\irrep{735}$\\
\irrep{1911} & = & $\irrep[1]{20}+\irrep{50}+\irrep{64}+\irrep{84}+\irrep{105}+\irrep{175}+\irrep{300}+\irrep{384}+\irrep{729}$\\
\irrep{2016} & = & $\irrep{4}+\irrepbar{4}+\irrep{20}+\irrepbar{20}+\irrep{60}+\irrepbar{60}+\irrep[1]{140}+\irrepbar[1]{140}+\irrep[1]{280}+\irrepbar[1]{280}+\irrep{504}+\irrepbar{504}$\\
\irrep{2079} & = & $\irrep{126}+\irrepbar{126}+\irrep{175}+\irrep{256}+\irrepbar{256}+\irrep{270}+\irrepbar{270}+\irrep{300}+\irrep[1]{300}$\\
\irrep[1]{2079} & = & $\irrep{1}+\irrep{6}+\irrep[1]{20}+\irrep{50}+\irrep{105}+\irrep{196}+\irrep{336}+\irrep[3]{540}+\irrep[1]{825}$\\
\irrep{2310} & = & $\irrep[2]{140}+\irrepbar[2]{140}+\irrep{189}+\irrepbar{189}+\irrep{256}+\irrepbar{256}+\irrep{270}+\irrepbar{270}+\irrep{300}+\irrep[1]{300}$\\
\bottomrule
\end{longtable}
\newpage
\begin{longtable}{rcp{0.9\textwidth}}
\caption{\label{tab:SO8BranchingRules}SO(8) Branching Rules}\\
\endfirsthead
\caption[]{SO(8) Branching Rules (continued)}\\
\endhead
\endfoot
\toprule
\rowcolor{tableheadcolor}
SO(8)& $\to$ &SO(7)\\
\midrule
\irrepsub{8}{s} & = & $\irrep{1}+\irrep{7}$\\
\irrepsub{8}{v} & = & $\irrep{8}$\\
\irrepsub{8}{c} & = & $\irrep{8}$\\
\irrep{28} & = & $\irrep{7}+\irrep{21}$\\
\irrepsub{35}{v} & = & $\irrep{35}$\\
\irrepsub{35}{c} & = & $\irrep{35}$\\
\irrepsub{35}{s} & = & $\irrep{1}+\irrep{7}+\irrep{27}$\\
\irrepsub{56}{s} & = & $\irrep{21}+\irrep{35}$\\
\irrepsub{56}{v} & = & $\irrep{8}+\irrep{48}$\\
\irrepsub{56}{c} & = & $\irrep{8}+\irrep{48}$\\
\irrepsub{112}{s} & = & $\irrep{1}+\irrep{7}+\irrep{27}+\irrep{77}$\\
\irrepsub{112}{v} & = & $\irrep[1]{112}$\\
\irrepsub{112}{c} & = & $\irrep[1]{112}$\\
\irrepsub{160}{s} & = & $\irrep{7}+\irrep{21}+\irrep{27}+\irrep{105}$\\
\irrepsub{160}{v} & = & $\irrep{48}+\irrep{112}$\\
\irrepsub{160}{c} & = & $\irrep{48}+\irrep{112}$\\
\irrepsub{224}{vs} & = & $\irrep{35}+\irrep{189}$\\
\irrepsub{224}{cs} & = & $\irrep{35}+\irrep{189}$\\
\irrepsub{224}{cv} & = & $\irrep{112}+\irrep[1]{112}$\\
\irrepsub{224}{sv} & = & $\irrep{8}+\irrep{48}+\irrep{168}$\\
\irrepsub{224}{vc} & = & $\irrep{112}+\irrep[1]{112}$\\
\irrepsub{224}{sc} & = & $\irrep{8}+\irrep{48}+\irrep{168}$\\
\irrepsub{294}{v} & = & $\irrep{294}$\\
\irrepsub{294}{c} & = & $\irrep{294}$\\
\irrepsub{294}{s} & = & $\irrep{1}+\irrep{7}+\irrep{27}+\irrep{77}+\irrep{182}$\\
\irrep{300} & = & $\irrep{27}+\irrep{105}+\irrep[1]{168}$\\
\irrep{350} & = & $\irrep{21}+\irrep{35}+\irrep{105}+\irrep{189}$\\
\irrepsub{567}{v} & = & $\irrep{189}+\irrep{378}$\\
\irrepsub{567}{c} & = & $\irrep{189}+\irrep{378}$\\
\irrepsub{567}{s} & = & $\irrep{7}+\irrep{21}+\irrep{27}+\irrep{77}+\irrep{105}+\irrep{330}$\\
\irrepsub{672}{vc} & = & $\irrep{294}+\irrep{378}$\\
\irrepsub{672}{cv} & = & $\irrep{294}+\irrep{378}$\\
\irrepsub{672}{cs} & = & $\irrep[1]{112}+\irrep{560}$\\
\irrepsub{672}{sc} & = & $\irrep{8}+\irrep{48}+\irrep{168}+\irrep{448}$\\
\irrepsub{672}{vs} & = & $\irrep[1]{112}+\irrep{560}$\\
\irrepsub{672}{sv} & = & $\irrep{8}+\irrep{48}+\irrep{168}+\irrep{448}$\\
\irrepsub[1]{672}{s} & = & $\irrep{1}+\irrep{7}+\irrep{27}+\irrep{77}+\irrep{182}+\irrep[1]{378}$\\
\irrepsub[1]{672}{v} & = & $\irrep{672}$\\
\irrepsub[1]{672}{c} & = & $\irrep{672}$\\
\irrepsub{840}{s} & = & $\irrep{105}+\irrep[1]{168}+\irrep{189}+\irrep{378}$\\
\irrepsub{840}{v} & = & $\irrep{48}+\irrep{112}+\irrep{168}+\irrep{512}$\\
\irrepsub{840}{c} & = & $\irrep{48}+\irrep{112}+\irrep{168}+\irrep{512}$\\
\irrepsub[1]{840}{s} & = & $\irrep[1]{168}+\irrep{294}+\irrep{378}$\\
\irrepsub[1]{840}{c} & = & $\irrep{35}+\irrep{189}+\irrep{616}$\\
\irrepsub[1]{840}{v} & = & $\irrep{35}+\irrep{189}+\irrep{616}$\\
\irrepsub{1296}{s} & = & $\irrep{21}+\irrep{35}+\irrep{105}+\irrep{189}+\irrep{330}+\irrep{616}$\\
\irrepsub{1296}{v} & = & $\irrep{112}+\irrep[1]{112}+\irrep{512}+\irrep{560}$\\
\irrepsub{1296}{c} & = & $\irrep{112}+\irrep[1]{112}+\irrep{512}+\irrep{560}$\\
\irrepsub{1386}{v} & = & $\irrep[1]{1386}$\\
\irrepsub{1386}{c} & = & $\irrep[1]{1386}$\\
\irrepsub{1386}{s} & = & $\irrep{1}+\irrep{7}+\irrep{27}+\irrep{77}+\irrep{182}+\irrep[1]{378}+\irrep{714}$\\
\irrepsub{1400}{s} & = & $\irrep{27}+\irrep{77}+\irrep{105}+\irrep[1]{168}+\irrep{330}+\irrep{693}$\\
\irrepsub{1400}{v} & = & $\irrep{168}+\irrep{512}+\irrep{720}$\\
\irrepsub{1400}{c} & = & $\irrep{168}+\irrep{512}+\irrep{720}$\\
\irrepsub{1568}{s} & = & $\irrep{7}+\irrep{21}+\irrep{27}+\irrep{77}+\irrep{105}+\irrep{182}+\irrep{330}+\irrep{819}$\\
\irrepsub{1568}{v} & = & $\irrep{560}+\irrep{1008}$\\
\irrepsub{1568}{c} & = & $\irrep{560}+\irrep{1008}$\\
\irrepsub{1680}{vs} & = & $\irrep{294}+\irrep{1386}$\\
\irrepsub{1680}{cs} & = & $\irrep{294}+\irrep{1386}$\\
\irrepsub{1680}{cv} & = & $\irrep{672}+\irrep{1008}$\\
\irrepsub{1680}{sv} & = & $\irrep{8}+\irrep{48}+\irrep{168}+\irrep{448}+\irrep[1]{1008}$\\
\irrepsub{1680}{vc} & = & $\irrep{672}+\irrep{1008}$\\
\irrepsub{1680}{sc} & = & $\irrep{8}+\irrep{48}+\irrep{168}+\irrep{448}+\irrep[1]{1008}$\\
\irrep{1925} & = & $\irrep{77}+\irrep{330}+\irrep{693}+\irrep{825}$\\
\irrepsub{2400}{sv} & = & $\irrep{35}+\irrep{189}+\irrep{616}+\irrep{1560}$\\
\irrepsub{2400}{sc} & = & $\irrep{35}+\irrep{189}+\irrep{616}+\irrep{1560}$\\
\irrepsub{2400}{vc} & = & $\irrep{672}+\irrep{720}+\irrep{1008}$\\
\irrepsub{2400}{vs} & = & $\irrep[1]{112}+\irrep{560}+\irrep{1728}$\\
\irrepsub{2400}{cv} & = & $\irrep{672}+\irrep{720}+\irrep{1008}$\\
\irrepsub{2400}{cs} & = & $\irrep[1]{112}+\irrep{560}+\irrep{1728}$\\
\irrepsub{2640}{s} & = & $\irrep{1}+\irrep{7}+\irrep{27}+\irrep{77}+\irrep{182}+\irrep[1]{378}+\irrep{714}+\irrep{1254}$\\
\irrepsub{2640}{v} & = & $\irrep{2640}$\\
\irrepsub{2640}{c} & = & $\irrep{2640}$\\
\irrepsub{2800}{vs} & = & $\irrep{189}+\irrep{378}+\irrep{616}+\irrep{1617}$\\
\irrepsub{2800}{cs} & = & $\irrep{189}+\irrep{378}+\irrep{616}+\irrep{1617}$\\
\irrepsub{2800}{cv} & = & $\irrep{512}+\irrep{560}+\irrep{720}+\irrep{1008}$\\
\irrepsub{2800}{sv} & = & $\irrep{48}+\irrep{112}+\irrep{168}+\irrep{448}+\irrep{512}+\irrep{1512}$\\
\irrepsub{2800}{vc} & = & $\irrep{512}+\irrep{560}+\irrep{720}+\irrep{1008}$\\
\irrepsub{2800}{sc} & = & $\irrep{48}+\irrep{112}+\irrep{168}+\irrep{448}+\irrep{512}+\irrep{1512}$\\
\irrepsub{3675}{v} & = & $\irrep{294}+\irrep{378}+\irrep{1386}+\irrep{1617}$\\
\irrepsub{3675}{c} & = & $\irrep{294}+\irrep{378}+\irrep{1386}+\irrep{1617}$\\
\irrepsub{3675}{s} & = & $\irrep{21}+\irrep{35}+\irrep{105}+\irrep{189}+\irrep{330}+\irrep{616}+\irrep{819}+\irrep{1560}$\\
\irrepsub{3696}{v} & = & $\irrep{1386}+\irrep{2310}$\\
\irrepsub{3696}{c} & = & $\irrep{1386}+\irrep{2310}$\\
\irrepsub{3696}{s} & = & $\irrep{7}+\irrep{21}+\irrep{27}+\irrep{77}+\irrep{105}+\irrep{182}+\irrep{330}+\irrep[1]{378}+\irrep{819}+\irrep{1750}$\\
\irrepsub[1]{3696}{vc} & = & $\irrep[1]{1386}+\irrep{2310}$\\
\irrepsub[1]{3696}{cv} & = & $\irrep[1]{1386}+\irrep{2310}$\\
\irrepsub[1]{3696}{cs} & = & $\irrep{672}+\irrep{3024}$\\
\irrepsub[1]{3696}{sc} & = & $\irrep{8}+\irrep{48}+\irrep{168}+\irrep{448}+\irrep[1]{1008}+\irrep{2016}$\\
\irrepsub[1]{3696}{vs} & = & $\irrep{672}+\irrep{3024}$\\
\irrepsub[1]{3696}{sv} & = & $\irrep{8}+\irrep{48}+\irrep{168}+\irrep{448}+\irrep[1]{1008}+\irrep{2016}$\\
\bottomrule
\end{longtable}
\newpage
\begin{longtable}{rcp{0.9\textwidth}}
\caption{\label{tab:SO9BranchingRules}SO(9) Branching Rules}\\
\endfirsthead
\caption[]{SO(9) Branching Rules (continued)}\\
\endhead
\endfoot
\toprule
\rowcolor{tableheadcolor}
SO(9)& $\to$ &SO(8)\\
\midrule
\irrep{9} & = & $\irrep{1}+\irrepsub{8}{v}$\\
\irrep{16} & = & $\irrepsub{8}{s}+\irrepsub{8}{c}$\\
\irrep{36} & = & $\irrepsub{8}{v}+\irrep{28}$\\
\irrep{44} & = & $\irrep{1}+\irrepsub{8}{v}+\irrepsub{35}{v}$\\
\irrep{84} & = & $\irrep{28}+\irrepsub{56}{v}$\\
\irrep{126} & = & $\irrepsub{35}{c}+\irrepsub{35}{s}+\irrepsub{56}{v}$\\
\irrep{128} & = & $\irrepsub{8}{s}+\irrepsub{8}{c}+\irrepsub{56}{c}+\irrepsub{56}{s}$\\
\irrep{156} & = & $\irrep{1}+\irrepsub{8}{v}+\irrepsub{35}{v}+\irrepsub{112}{v}$\\
\irrep{231} & = & $\irrepsub{8}{v}+\irrep{28}+\irrepsub{35}{v}+\irrepsub{160}{v}$\\
\irrep{432} & = & $\irrepsub{56}{c}+\irrepsub{56}{s}+\irrepsub{160}{s}+\irrepsub{160}{c}$\\
\irrep{450} & = & $\irrep{1}+\irrepsub{8}{v}+\irrepsub{35}{v}+\irrepsub{112}{v}+\irrepsub{294}{v}$\\
\irrep{495} & = & $\irrepsub{35}{v}+\irrepsub{160}{v}+\irrep{300}$\\
\irrep{576} & = & $\irrepsub{8}{s}+\irrepsub{8}{c}+\irrepsub{56}{c}+\irrepsub{56}{s}+\irrepsub{224}{vs}+\irrepsub{224}{vc}$\\
\irrep{594} & = & $\irrep{28}+\irrepsub{56}{v}+\irrepsub{160}{v}+\irrep{350}$\\
\irrep{672} & = & $\irrepsub{112}{s}+\irrepsub{112}{c}+\irrepsub{224}{sc}+\irrepsub{224}{cs}$\\
\irrep{768} & = & $\irrepsub{160}{s}+\irrepsub{160}{c}+\irrepsub{224}{sc}+\irrepsub{224}{cs}$\\
\irrep{910} & = & $\irrepsub{8}{v}+\irrep{28}+\irrepsub{35}{v}+\irrepsub{112}{v}+\irrepsub{160}{v}+\irrepsub{567}{v}$\\
\irrep{924} & = & $\irrepsub{35}{c}+\irrepsub{35}{s}+\irrepsub{56}{v}+\irrepsub{224}{cv}+\irrepsub{224}{sv}+\irrep{350}$\\
\irrep{1122} & = & $\irrep{1}+\irrepsub{8}{v}+\irrepsub{35}{v}+\irrepsub{112}{v}+\irrepsub{294}{v}+\irrepsub[1]{672}{v}$\\
\irrep{1650} & = & $\irrepsub{160}{v}+\irrep{300}+\irrep{350}+\irrepsub{840}{v}$\\
\irrep{1920} & = & $\irrepsub{8}{s}+\irrepsub{8}{c}+\irrepsub{56}{c}+\irrepsub{56}{s}+\irrepsub{224}{vs}+\irrepsub{224}{vc}+\irrepsub{672}{vs}+\irrepsub{672}{vc}$\\
\irrep{1980} & = & $\irrep{300}+\irrepsub{840}{v}+\irrepsub[1]{840}{v}$\\
\irrep{2457} & = & $\irrep{28}+\irrepsub{56}{v}+\irrepsub{160}{v}+\irrep{350}+\irrepsub{567}{v}+\irrepsub{1296}{v}$\\
\irrep{2508} & = & $\irrep{1}+\irrepsub{8}{v}+\irrepsub{35}{v}+\irrepsub{112}{v}+\irrepsub{294}{v}+\irrepsub[1]{672}{v}+\irrepsub{1386}{v}$\\
\irrep{2560} & = & $\irrepsub{56}{c}+\irrepsub{56}{s}+\irrepsub{160}{s}+\irrepsub{160}{c}+\irrepsub{224}{vs}+\irrepsub{224}{vc}+\irrepsub{840}{c}+\irrepsub{840}{s}$\\
\irrep{2574} & = & $\irrepsub{35}{v}+\irrepsub{112}{v}+\irrepsub{160}{v}+\irrep{300}+\irrepsub{567}{v}+\irrepsub{1400}{v}$\\
\irrep{2772} & = & $\irrepsub{224}{cv}+\irrepsub{224}{sv}+\irrep{350}+\irrepsub{567}{c}+\irrepsub{567}{s}+\irrepsub{840}{v}$\\
\irrep[1]{2772} & = & $\irrepsub{294}{c}+\irrepsub{294}{s}+\irrepsub{672}{cs}+\irrepsub{672}{sc}+\irrepsub[1]{840}{v}$\\
\irrep[2]{2772} & = & $\irrepsub{8}{v}+\irrep{28}+\irrepsub{35}{v}+\irrepsub{112}{v}+\irrepsub{160}{v}+\irrepsub{294}{v}+\irrepsub{567}{v}+\irrepsub{1568}{v}$\\
\irrep{3900} & = & $\irrepsub{35}{c}+\irrepsub{35}{s}+\irrepsub{56}{v}+\irrepsub{224}{cv}+\irrepsub{224}{sv}+\irrep{350}+\irrepsub[1]{840}{s}+\irrepsub[1]{840}{c}+\irrepsub{1296}{v}$\\
\irrep{4004} & = & $\irrepsub{112}{v}+\irrepsub{567}{v}+\irrepsub{1400}{v}+\irrep{1925}$\\
\irrep{4158} & = & $\irrepsub{567}{c}+\irrepsub{567}{s}+\irrepsub{672}{cs}+\irrepsub{672}{sc}+\irrepsub{840}{v}+\irrepsub[1]{840}{v}$\\
\irrep{4608} & = & $\irrepsub{112}{s}+\irrepsub{112}{c}+\irrepsub{224}{sc}+\irrepsub{224}{cs}+\irrepsub{672}{sv}+\irrepsub{672}{cv}+\irrepsub{1296}{s}+\irrepsub{1296}{c}$\\
\bottomrule
\rowcolor{white}\\[-\medskipamount]
\toprule
\rowcolor{tableheadcolor}
SO(9)& $\to$ &SU(4)${\times}$SU(2)\\
\midrule
\irrep{9} & = & $(\irrep{1},\irrep{3})+(\irrep{6},\irrep{1})$\\
\irrep{16} & = & $(\irrep{4},\irrep{2})+(\irrepbar{4},\irrep{2})$\\
\irrep{36} & = & $(\irrep{1},\irrep{3})+(\irrep{6},\irrep{3})+(\irrep{15},\irrep{1})$\\
\irrep{44} & = & $(\irrep{1},\irrep{1})+(\irrep{1},\irrep{5})+(\irrep{6},\irrep{3})+(\irrep[1]{20},\irrep{1})$\\
\irrep{84} & = & $(\irrep{1},\irrep{1})+(\irrep{6},\irrep{3})+(\irrep{10},\irrep{1})+(\irrepbar{10},\irrep{1})+(\irrep{15},\irrep{3})$\\
\irrep{126} & = & $(\irrep{6},\irrep{1})+(\irrep{10},\irrep{3})+(\irrepbar{10},\irrep{3})+(\irrep{15},\irrep{1})+(\irrep{15},\irrep{3})$\\
\irrep{128} & = & $(\irrep{4},\irrep{2})+(\irrepbar{4},\irrep{2})+(\irrep{4},\irrep{4})+(\irrepbar{4},\irrep{4})+(\irrep{20},\irrep{2})+(\irrepbar{20},\irrep{2})$\\
\irrep{156} & = & $(\irrep{1},\irrep{3})+(\irrep{6},\irrep{1})+(\irrep{1},\irrep{7})+(\irrep{6},\irrep{5})+(\irrep[1]{20},\irrep{3})+(\irrep{50},\irrep{1})$\\
\irrep{231} & = & $(\irrep{1},\irrep{3})+(\irrep{1},\irrep{5})+(\irrep{6},\irrep{1})+(\irrep{6},\irrep{3})+(\irrep{6},\irrep{5})+(\irrep{15},\irrep{3})+(\irrep[1]{20},\irrep{3})+(\irrep{64},\irrep{1})$\\
\irrep{432} & = & $(\irrep{4},\irrep{2})+(\irrepbar{4},\irrep{2})+(\irrep{4},\irrep{4})+(\irrepbar{4},\irrep{4})+(\irrep{20},\irrep{2})+(\irrepbar{20},\irrep{2})+(\irrep{20},\irrep{4})+(\irrepbar{20},\irrep{4})+(\irrep{36},\irrep{2})+(\irrepbar{36},\irrep{2})$\\
\irrep{450} & = & $(\irrep{1},\irrep{1})+(\irrep{1},\irrep{5})+(\irrep{6},\irrep{3})+(\irrep{1},\irrep{9})+(\irrep{6},\irrep{7})+(\irrep[1]{20},\irrep{1})+(\irrep[1]{20},\irrep{5})+(\irrep{50},\irrep{3})+(\irrep{105},\irrep{1})$\\
\irrep{495} & = & $(\irrep{1},\irrep{1})+(\irrep{1},\irrep{5})+(\irrep{6},\irrep{3})+(\irrep{6},\irrep{5})+(\irrep{15},\irrep{3})+(\irrep[1]{20},\irrep{1})+(\irrep[1]{20},\irrep{5})+(\irrep{64},\irrep{3})+(\irrep{84},\irrep{1})$\\
\irrep{576} & = & $(\irrep{4},\irrep{2})+(\irrepbar{4},\irrep{2})+(\irrep{4},\irrep{4})+(\irrepbar{4},\irrep{4})+(\irrep{4},\irrep{6})+(\irrepbar{4},\irrep{6})+(\irrep{20},\irrep{2})+(\irrepbar{20},\irrep{2})+(\irrep{20},\irrep{4})+(\irrepbar{20},\irrep{4})+(\irrep{60},\irrep{2})+(\irrepbar{60},\irrep{2})$\\
\irrep{594} & = & $(\irrep{1},\irrep{3})+(\irrep{6},\irrep{1})+(\irrep{6},\irrep{3})+(\irrep{6},\irrep{5})+(\irrep{10},\irrep{3})+(\irrepbar{10},\irrep{3})+(\irrep{15},\irrep{1})+(\irrep{15},\irrep{3})+(\irrep{15},\irrep{5})+(\irrep[1]{20},\irrep{3})+(\irrep{45},\irrep{1})+(\irrepbar{45},\irrep{1})+(\irrep{64},\irrep{3})$\\
\irrep{672} & = & $(\irrep{20},\irrep{2})+(\irrepbar{20},\irrep{2})+(\irrep[2]{20},\irrep{4})+(\irrepbar[2]{20},\irrep{4})+(\irrep{36},\irrep{2})+(\irrepbar{36},\irrep{2})+(\irrep{36},\irrep{4})+(\irrepbar{36},\irrep{4})$\\
\irrep{768} & = & $(\irrep{4},\irrep{2})+(\irrepbar{4},\irrep{2})+(\irrep{20},\irrep{2})+(\irrepbar{20},\irrep{2})+(\irrep[2]{20},\irrep{2})+(\irrepbar[2]{20},\irrep{2})+(\irrep{20},\irrep{4})+(\irrepbar{20},\irrep{4})+(\irrep{36},\irrep{2})+(\irrepbar{36},\irrep{2})+(\irrep{36},\irrep{4})+(\irrepbar{36},\irrep{4})$\\
\irrep{910} & = & $(\irrep{1},\irrep{3})+(\irrep{1},\irrep{5})+(\irrep{1},\irrep{7})+2(\irrep{6},\irrep{3})+(\irrep{6},\irrep{5})+(\irrep{6},\irrep{7})+(\irrep{15},\irrep{1})+(\irrep{15},\irrep{5})+(\irrep[1]{20},\irrep{1})+(\irrep[1]{20},\irrep{3})+(\irrep[1]{20},\irrep{5})+(\irrep{50},\irrep{3})+(\irrep{64},\irrep{3})+(\irrep{175},\irrep{1})$\\
\irrep{924} & = & $(\irrep{6},\irrep{3})+(\irrep{10},\irrep{1})+(\irrepbar{10},\irrep{1})+(\irrep{10},\irrep{3})+(\irrepbar{10},\irrep{3})+(\irrep{10},\irrep{5})+(\irrepbar{10},\irrep{5})+(\irrep{15},\irrep{1})+2(\irrep{15},\irrep{3})+(\irrep{15},\irrep{5})+(\irrep[1]{20},\irrep{1})+(\irrep{45},\irrep{3})+(\irrepbar{45},\irrep{3})+(\irrep{64},\irrep{1})+(\irrep{64},\irrep{3})$\\
\irrep{1122} & = & $(\irrep{1},\irrep{3})+(\irrep{6},\irrep{1})+(\irrep{1},\irrep{7})+(\irrep{6},\irrep{5})+(\irrep{1},\irrep{11})+(\irrep{6},\irrep{9})+(\irrep[1]{20},\irrep{3})+(\irrep[1]{20},\irrep{7})+(\irrep{50},\irrep{1})+(\irrep{50},\irrep{5})+(\irrep{105},\irrep{3})+(\irrep{196},\irrep{1})$\\
\irrep{1650} & = & $(\irrep{1},\irrep{3})+(\irrep{6},\irrep{1})+(\irrep{6},\irrep{3})+(\irrep{6},\irrep{5})+(\irrep{10},\irrep{3})+(\irrepbar{10},\irrep{3})+(\irrep{15},\irrep{1})+(\irrep{15},\irrep{3})+(\irrep{15},\irrep{5})+(\irrep[1]{20},\irrep{3})+(\irrep[1]{20},\irrep{5})+(\irrep{45},\irrep{3})+(\irrepbar{45},\irrep{3})+(\irrep{64},\irrep{1})+(\irrep{64},\irrep{3})+(\irrep{64},\irrep{5})+(\irrep{70},\irrep{1})+(\irrepbar{70},\irrep{1})+(\irrep{84},\irrep{3})$\\
\irrep{1920} & = & $(\irrep{4},\irrep{2})+(\irrepbar{4},\irrep{2})+(\irrep{4},\irrep{4})+(\irrepbar{4},\irrep{4})+(\irrep{4},\irrep{6})+(\irrepbar{4},\irrep{6})+(\irrep{4},\irrep{8})+(\irrepbar{4},\irrep{8})+(\irrep{20},\irrep{2})+(\irrepbar{20},\irrep{2})+(\irrep{20},\irrep{4})+(\irrepbar{20},\irrep{4})+(\irrep{20},\irrep{6})+(\irrepbar{20},\irrep{6})+(\irrep{60},\irrep{2})+(\irrepbar{60},\irrep{2})+(\irrep{60},\irrep{4})+(\irrepbar{60},\irrep{4})+(\irrep[1]{140},\irrep{2})+(\irrepbar[1]{140},\irrep{2})$\\
\irrep{1980} & = & $(\irrep{1},\irrep{1})+(\irrep{6},\irrep{3})+(\irrep{10},\irrep{1})+(\irrepbar{10},\irrep{1})+(\irrep{15},\irrep{3})+(\irrep[1]{20},\irrep{1})+(\irrep[1]{20},\irrep{5})+(\irrep{35},\irrep{1})+(\irrepbar{35},\irrep{1})+(\irrep{45},\irrep{3})+(\irrepbar{45},\irrep{3})+(\irrep{64},\irrep{3})+(\irrep{64},\irrep{5})+(\irrep{70},\irrep{3})+(\irrepbar{70},\irrep{3})+(\irrep{84},\irrep{1})+(\irrep{84},\irrep{5})$\\
\irrep{2457} & = & $(\irrep{1},\irrep{1})+(\irrep{1},\irrep{5})+2(\irrep{6},\irrep{3})+(\irrep{10},\irrep{1})+(\irrepbar{10},\irrep{1})+(\irrep{6},\irrep{5})+(\irrep{6},\irrep{7})+(\irrep{10},\irrep{5})+(\irrepbar{10},\irrep{5})+2(\irrep{15},\irrep{3})+(\irrep{15},\irrep{5})+(\irrep[1]{20},\irrep{1})+(\irrep{15},\irrep{7})+(\irrep[1]{20},\irrep{3})+(\irrep[1]{20},\irrep{5})+(\irrep{45},\irrep{3})+(\irrepbar{45},\irrep{3})+(\irrep{50},\irrep{3})+(\irrep{64},\irrep{1})+(\irrep{64},\irrep{3})+(\irrep{64},\irrep{5})+(\irrep{126},\irrep{1})+(\irrepbar{126},\irrep{1})+(\irrep{175},\irrep{3})$\\
\irrep{2508} & = & $(\irrep{1},\irrep{1})+(\irrep{1},\irrep{5})+(\irrep{6},\irrep{3})+(\irrep{1},\irrep{9})+(\irrep{6},\irrep{7})+(\irrep{1},\irrep{13})+(\irrep{6},\irrep{11})+(\irrep[1]{20},\irrep{1})+(\irrep[1]{20},\irrep{5})+(\irrep[1]{20},\irrep{9})+(\irrep{50},\irrep{3})+(\irrep{50},\irrep{7})+(\irrep{105},\irrep{1})+(\irrep{105},\irrep{5})+(\irrep{196},\irrep{3})+(\irrep{336},\irrep{1})$\\
\irrep{2560} & = & $(\irrep{4},\irrep{2})+(\irrepbar{4},\irrep{2})+2(\irrep{4},\irrep{4})+2(\irrepbar{4},\irrep{4})+(\irrep{4},\irrep{6})+(\irrepbar{4},\irrep{6})+2(\irrep{20},\irrep{2})+2(\irrepbar{20},\irrep{2})+2(\irrep{20},\irrep{4})+2(\irrepbar{20},\irrep{4})+(\irrep{20},\irrep{6})+(\irrepbar{20},\irrep{6})+(\irrep{36},\irrep{2})+(\irrepbar{36},\irrep{2})+(\irrep{36},\irrep{4})+(\irrepbar{36},\irrep{4})+(\irrep{60},\irrep{2})+(\irrepbar{60},\irrep{2})+(\irrep{60},\irrep{4})+(\irrepbar{60},\irrep{4})+(\irrep{140},\irrep{2})+(\irrepbar{140},\irrep{2})$\\
\irrep{2574} & = & $(\irrep{1},\irrep{3})+(\irrep{1},\irrep{5})+(\irrep{6},\irrep{1})+(\irrep{1},\irrep{7})+(\irrep{6},\irrep{3})+2(\irrep{6},\irrep{5})+(\irrep{6},\irrep{7})+(\irrep{15},\irrep{3})+(\irrep{15},\irrep{5})+2(\irrep[1]{20},\irrep{3})+(\irrep[1]{20},\irrep{5})+(\irrep[1]{20},\irrep{7})+(\irrep{50},\irrep{1})+(\irrep{50},\irrep{5})+(\irrep{64},\irrep{1})+(\irrep{64},\irrep{3})+(\irrep{64},\irrep{5})+(\irrep{84},\irrep{3})+(\irrep{175},\irrep{3})+(\irrep{300},\irrep{1})$\\
\irrep{2772} & = & $(\irrep{6},\irrep{3})+(\irrep{10},\irrep{1})+(\irrepbar{10},\irrep{1})+(\irrep{10},\irrep{3})+(\irrepbar{10},\irrep{3})+(\irrep{10},\irrep{5})+(\irrepbar{10},\irrep{5})+(\irrep{15},\irrep{1})+2(\irrep{15},\irrep{3})+(\irrep{15},\irrep{5})+(\irrep[1]{20},\irrep{3})+(\irrep{45},\irrep{1})+(\irrepbar{45},\irrep{1})+(\irrep{45},\irrep{3})+(\irrepbar{45},\irrep{3})+(\irrep{45},\irrep{5})+(\irrepbar{45},\irrep{5})+(\irrep{64},\irrep{1})+2(\irrep{64},\irrep{3})+(\irrep{64},\irrep{5})+(\irrep{70},\irrep{3})+(\irrepbar{70},\irrep{3})+(\irrep{84},\irrep{1})+(\irrep{84},\irrep{3})$\\
\irrep[1]{2772} & = & $(\irrep[1]{20},\irrep{1})+(\irrep{35},\irrep{5})+(\irrepbar{35},\irrep{5})+(\irrep{45},\irrep{3})+(\irrepbar{45},\irrep{3})+(\irrep{64},\irrep{1})+(\irrep{64},\irrep{3})+(\irrep{70},\irrep{3})+(\irrepbar{70},\irrep{3})+(\irrep{70},\irrep{5})+(\irrepbar{70},\irrep{5})+(\irrep{84},\irrep{1})+(\irrep{84},\irrep{3})+(\irrep{84},\irrep{5})$\\
\irrep[2]{2772} & = & $(\irrep{1},\irrep{3})+(\irrep{1},\irrep{5})+(\irrep{6},\irrep{1})+(\irrep{1},\irrep{7})+(\irrep{6},\irrep{3})+(\irrep{1},\irrep{9})+2(\irrep{6},\irrep{5})+(\irrep{6},\irrep{7})+(\irrep{6},\irrep{9})+(\irrep{15},\irrep{3})+(\irrep{15},\irrep{7})+2(\irrep[1]{20},\irrep{3})+(\irrep[1]{20},\irrep{5})+(\irrep[1]{20},\irrep{7})+(\irrep{50},\irrep{1})+(\irrep{50},\irrep{3})+(\irrep{50},\irrep{5})+(\irrep{64},\irrep{1})+(\irrep{64},\irrep{5})+(\irrep{105},\irrep{3})+(\irrep{175},\irrep{3})+(\irrep{384},\irrep{1})$\\
\irrep{3900} & = & $(\irrep{6},\irrep{1})+(\irrep{6},\irrep{5})+2(\irrep{10},\irrep{3})+2(\irrepbar{10},\irrep{3})+(\irrep{10},\irrep{5})+(\irrepbar{10},\irrep{5})+(\irrep{15},\irrep{1})+(\irrep{10},\irrep{7})+(\irrepbar{10},\irrep{7})+2(\irrep{15},\irrep{3})+2(\irrep{15},\irrep{5})+(\irrep{15},\irrep{7})+(\irrep[1]{20},\irrep{3})+(\irrep{45},\irrep{1})+(\irrepbar{45},\irrep{1})+(\irrep{45},\irrep{3})+(\irrepbar{45},\irrep{3})+(\irrep{45},\irrep{5})+(\irrepbar{45},\irrep{5})+(\irrep{50},\irrep{1})+(\irrep{64},\irrep{1})+2(\irrep{64},\irrep{3})+(\irrep{64},\irrep{5})+(\irrep{126},\irrep{3})+(\irrepbar{126},\irrep{3})+(\irrep{175},\irrep{1})+(\irrep{175},\irrep{3})$\\
\irrep{4004} & = & $(\irrep{1},\irrep{3})+(\irrep{1},\irrep{7})+(\irrep{6},\irrep{3})+(\irrep{6},\irrep{5})+(\irrep{6},\irrep{7})+(\irrep{15},\irrep{1})+(\irrep{15},\irrep{5})+(\irrep[1]{20},\irrep{3})+(\irrep[1]{20},\irrep{5})+(\irrep[1]{20},\irrep{7})+(\irrep{50},\irrep{3})+(\irrep{50},\irrep{7})+(\irrep{64},\irrep{3})+(\irrep{64},\irrep{5})+(\irrep{84},\irrep{3})+(\irrep{175},\irrep{1})+(\irrep{175},\irrep{5})+(\irrep[1]{300},\irrep{1})+(\irrep{300},\irrep{3})$\\
\irrep{4158} & = & $(\irrep{6},\irrep{1})+(\irrep{10},\irrep{3})+(\irrepbar{10},\irrep{3})+(\irrep{15},\irrep{1})+(\irrep{15},\irrep{3})+(\irrep[1]{20},\irrep{3})+(\irrep{35},\irrep{3})+(\irrepbar{35},\irrep{3})+(\irrep{45},\irrep{1})+(\irrepbar{45},\irrep{1})+(\irrep{45},\irrep{3})+(\irrepbar{45},\irrep{3})+(\irrep{45},\irrep{5})+(\irrepbar{45},\irrep{5})+(\irrep{64},\irrep{1})+2(\irrep{64},\irrep{3})+(\irrep{64},\irrep{5})+(\irrep{70},\irrep{1})+(\irrepbar{70},\irrep{1})+(\irrep{70},\irrep{3})+(\irrepbar{70},\irrep{3})+(\irrep{70},\irrep{5})+(\irrepbar{70},\irrep{5})+2(\irrep{84},\irrep{3})+(\irrep{84},\irrep{5})$\\
\irrep{4608} & = & $(\irrep{20},\irrep{2})+(\irrepbar{20},\irrep{2})+(\irrep[2]{20},\irrep{2})+(\irrepbar[2]{20},\irrep{2})+(\irrep{20},\irrep{4})+(\irrepbar{20},\irrep{4})+(\irrep[2]{20},\irrep{4})+(\irrepbar[2]{20},\irrep{4})+(\irrep[2]{20},\irrep{6})+(\irrepbar[2]{20},\irrep{6})+2(\irrep{36},\irrep{2})+2(\irrepbar{36},\irrep{2})+2(\irrep{36},\irrep{4})+2(\irrepbar{36},\irrep{4})+(\irrep{36},\irrep{6})+(\irrepbar{36},\irrep{6})+(\irrep{60},\irrep{2})+(\irrepbar{60},\irrep{2})+(\irrep[1]{84},\irrep{4})+(\irrepbar[1]{84},\irrep{4})+(\irrep{140},\irrep{2})+(\irrepbar{140},\irrep{2})+(\irrep{140},\irrep{4})+(\irrepbar{140},\irrep{4})$\\
\bottomrule
\end{longtable}
\newpage
\begin{longtable}{rcp{0.9\textwidth}}
\caption{\label{tab:SO10BranchingRules}SO(10) Branching Rules}\\
\endfirsthead
\caption[]{SO(10) Branching Rules (continued)}\\
\endhead
\endfoot
\toprule
\rowcolor{tableheadcolor}
SO(10)& $\to$ &SU(5)${\times}$U(1)\\
\midrule
\irrep{10} & = & $(\irrep{5})(2)+(\irrepbar{5})(-2)$\\
\irrep{16} & = & $(\irrep{1})(-5)+(\irrepbar{5})(3)+(\irrep{10})(-1)$\\
\irrep{45} & = & $(\irrep{1})(0)+(\irrep{10})(4)+(\irrepbar{10})(-4)+(\irrep{24})(0)$\\
\irrep{54} & = & $(\irrep{15})(4)+(\irrepbar{15})(-4)+(\irrep{24})(0)$\\
\irrep{120} & = & $(\irrep{5})(2)+(\irrepbar{5})(-2)+(\irrep{10})(-6)+(\irrepbar{10})(6)+(\irrep{45})(2)+(\irrepbar{45})(-2)$\\
\irrep{126} & = & $(\irrep{1})(10)+(\irrep{5})(2)+(\irrepbar{10})(6)+(\irrep{15})(-6)+(\irrepbar{45})(-2)+(\irrep{50})(2)$\\
\irrep{144} & = & $(\irrep{5})(7)+(\irrepbar{5})(3)+(\irrep{10})(-1)+(\irrep{15})(-1)+(\irrep{24})(-5)+(\irrep{40})(-1)+(\irrepbar{45})(3)$\\
\irrep{210} & = & $(\irrep{1})(0)+(\irrep{5})(-8)+(\irrepbar{5})(8)+(\irrep{10})(4)+(\irrepbar{10})(-4)+(\irrep{24})(0)+(\irrep{40})(4)+(\irrepbar{40})(-4)+(\irrep{75})(0)$\\
\irrep[1]{210} & = & $(\irrep{35})(-6)+(\irrepbar{35})(6)+(\irrep{70})(2)+(\irrepbar{70})(-2)$\\
\irrep{320} & = & $(\irrep{5})(2)+(\irrepbar{5})(-2)+(\irrep{40})(-6)+(\irrepbar{40})(6)+(\irrep{45})(2)+(\irrepbar{45})(-2)+(\irrep{70})(2)+(\irrepbar{70})(-2)$\\
\irrep{560} & = & $(\irrep{1})(-5)+(\irrepbar{5})(3)+2(\irrep{10})(-1)+(\irrepbar{10})(-9)+(\irrep{24})(-5)+(\irrep{40})(-1)+(\irrep{45})(7)+(\irrepbar{45})(3)+(\irrepbar{50})(3)+(\irrepbar{70})(3)+(\irrep{75})(-5)+(\irrep{175})(-1)$\\
\irrep{660} & = & $(\irrep[1]{70})(-8)+(\irrepbar[1]{70})(8)+(\irrep{160})(4)+(\irrepbar{160})(-4)+(\irrep{200})(0)$\\
\irrep{672} & = & $(\irrep{1})(15)+(\irrep{5})(7)+(\irrepbar{10})(11)+(\irrep{15})(-1)+(\irrepbar{35})(-9)+(\irrepbar{45})(3)+(\irrep{50})(7)+(\irrep{126})(-5)+(\irrepbar[2]{175})(3)+(\irrep{210})(-1)$\\
\irrep{720} & = & $(\irrep{15})(-1)+(\irrepbar{15})(-9)+(\irrep{24})(-5)+(\irrep{35})(-1)+(\irrep{40})(-1)+(\irrepbar{45})(3)+(\irrep{70})(7)+(\irrepbar{70})(3)+(\irrepbar{105})(3)+(\irrepbar{126})(-5)+(\irrep{175})(-1)$\\
\irrep{770} & = & $(\irrep{1})(0)+(\irrep{10})(4)+(\irrepbar{10})(-4)+(\irrep{24})(0)+(\irrep{50})(-8)+(\irrepbar{50})(8)+(\irrep{75})(0)+(\irrep{175})(4)+(\irrepbar{175})(-4)+(\irrep{200})(0)$\\
\irrep{945} & = & $(\irrep{10})(4)+(\irrepbar{10})(-4)+(\irrep{15})(4)+(\irrepbar{15})(-4)+2(\irrep{24})(0)+(\irrep{40})(4)+(\irrepbar{40})(-4)+(\irrep{45})(-8)+(\irrepbar{45})(8)+(\irrep{75})(0)+(\irrep{126})(0)+(\irrepbar{126})(0)+(\irrep{175})(4)+(\irrepbar{175})(-4)$\\
\irrep{1050} & = & $(\irrep{5})(12)+(\irrepbar{5})(8)+(\irrep{10})(4)+(\irrep{15})(4)+(\irrep{24})(0)+(\irrepbar{35})(-4)+(\irrep{40})(4)+(\irrepbar{40})(-4)+(\irrepbar{45})(8)+(\irrep{70})(-8)+(\irrep{75})(0)+(\irrep{126})(0)+(\irrepbar{175})(-4)+(\irrepbar[1]{175})(0)+(\irrep{210})(4)$\\
\irrep{1200} & = & $(\irrep{5})(7)+(\irrepbar{5})(3)+(\irrep{10})(-1)+(\irrepbar{10})(11)+(\irrep{15})(-1)+(\irrep{24})(-5)+(\irrep{40})(-1)+(\irrepbar{40})(-9)+(\irrep{45})(7)+2(\irrepbar{45})(3)+(\irrep{50})(7)+(\irrep{75})(-5)+(\irrep{126})(-5)+(\irrep{175})(-1)+(\irrep{210})(-1)+(\irrepbar{280})(3)$\\
\irrep{1386} & = & $(\irrep{15})(4)+(\irrepbar{15})(-4)+(\irrep{24})(0)+(\irrep{105})(-8)+(\irrepbar{105})(8)+(\irrep{126})(0)+(\irrepbar{126})(0)+(\irrep{160})(4)+(\irrepbar{160})(-4)+(\irrep{175})(4)+(\irrepbar{175})(-4)+(\irrep{200})(0)$\\
\irrep{1440} & = & $(\irrep{1})(-5)+(\irrep{5})(-13)+(\irrepbar{5})(3)+(\irrep{10})(-1)+(\irrepbar{10})(-9)+(\irrepbar{15})(11)+(\irrep{24})(-5)+(\irrep{40})(-1)+(\irrepbar{40})(-9)+(\irrep{45})(7)+(\irrepbar{50})(3)+(\irrepbar{70})(3)+(\irrep{75})(-5)+(\irrep{105})(7)+(\irrep{175})(-1)+(\irrep[1]{175})(-5)+(\irrepbar{280})(3)+(\irrep{315})(-1)$\\
\irrep{1728} & = & $(\irrep{5})(2)+(\irrepbar{5})(-2)+(\irrep{10})(-6)+(\irrepbar{10})(6)+(\irrep{15})(-6)+(\irrepbar{15})(6)+(\irrep{24})(10)+(\irrep{24})(-10)+(\irrep{40})(-6)+(\irrepbar{40})(6)+2(\irrep{45})(2)+2(\irrepbar{45})(-2)+(\irrep{50})(2)+(\irrepbar{50})(-2)+(\irrep{70})(2)+(\irrepbar{70})(-2)+(\irrep{105})(2)+(\irrepbar{105})(-2)+(\irrep{175})(-6)+(\irrepbar{175})(6)+(\irrep{280})(2)+(\irrepbar{280})(-2)$\\
\irrep{1782} & = & $(\irrep[1]{126})(10)+(\irrepbar[1]{126})(-10)+(\irrep[1]{315})(-6)+(\irrepbar[1]{315})(6)+(\irrep[1]{450})(2)+(\irrepbar[1]{450})(-2)$\\
\irrep{2640} & = & $(\irrep{35})(-1)+(\irrepbar{35})(11)+(\irrep{70})(7)+(\irrepbar{70})(3)+(\irrepbar[1]{70})(3)+(\irrepbar{105})(3)+(\irrepbar{126})(-5)+(\irrep{160})(-1)+(\irrepbar{160})(-9)+(\irrep{175})(-1)+(\irrep{200})(-5)+(\irrepbar{224})(-5)+(\irrep[1]{280})(7)+(\irrep{450})(-1)+(\irrepbar{480})(3)$\\
\irrep{2772} & = & $(\irrep{1})(20)+(\irrep{5})(12)+(\irrepbar{10})(16)+(\irrep{15})(4)+(\irrepbar{35})(-4)+(\irrepbar{45})(8)+(\irrep{50})(12)+(\irrepbar[1]{70})(-12)+(\irrep{126})(0)+(\irrepbar[2]{175})(8)+(\irrep{210})(4)+(\irrep[1]{280})(-8)+(\irrep{490})(4)+(\irrepbar{560})(-4)+(\irrep[1]{700})(0)$\\
\irrep{2970} & = & $(\irrep{5})(2)+(\irrepbar{5})(-2)+(\irrep{10})(-6)+(\irrepbar{10})(6)+(\irrep{40})(-6)+(\irrepbar{40})(6)+2(\irrep{45})(2)+2(\irrepbar{45})(-2)+(\irrep{50})(2)+(\irrepbar{50})(-2)+(\irrep{70})(2)+(\irrepbar{70})(-2)+(\irrep{75})(10)+(\irrep{75})(-10)+(\irrep{175})(-6)+(\irrepbar{175})(6)+(\irrep{210})(-6)+(\irrepbar{210})(6)+(\irrep{280})(2)+(\irrepbar{280})(-2)+(\irrep{480})(2)+(\irrepbar{480})(-2)$\\
\bottomrule
\rowcolor{white}\\[-\medskipamount]
\newpage
\toprule
\rowcolor{tableheadcolor}
SO(10)& $\to$ &SU(2)${\times}$SU(2)${\times}$SU(4)\\
\midrule
\irrep{10} & = & $(\irrep{2},\irrep{2},\irrep{1})+(\irrep{1},\irrep{1},\irrep{6})$\\
\irrep{16} & = & $(\irrep{2},\irrep{1},\irrep{4})+(\irrep{1},\irrep{2},\irrepbar{4})$\\
\irrep{45} & = & $(\irrep{3},\irrep{1},\irrep{1})+(\irrep{1},\irrep{3},\irrep{1})+(\irrep{2},\irrep{2},\irrep{6})+(\irrep{1},\irrep{1},\irrep{15})$\\
\irrep{54} & = & $(\irrep{1},\irrep{1},\irrep{1})+(\irrep{3},\irrep{3},\irrep{1})+(\irrep{2},\irrep{2},\irrep{6})+(\irrep{1},\irrep{1},\irrep[1]{20})$\\
\irrep{120} & = & $(\irrep{2},\irrep{2},\irrep{1})+(\irrep{3},\irrep{1},\irrep{6})+(\irrep{1},\irrep{3},\irrep{6})+(\irrep{1},\irrep{1},\irrep{10})+(\irrep{1},\irrep{1},\irrepbar{10})+(\irrep{2},\irrep{2},\irrep{15})$\\
\irrep{126} & = & $(\irrep{1},\irrep{1},\irrep{6})+(\irrep{3},\irrep{1},\irrepbar{10})+(\irrep{1},\irrep{3},\irrep{10})+(\irrep{2},\irrep{2},\irrep{15})$\\
\irrep{144} & = & $(\irrep{2},\irrep{1},\irrep{4})+(\irrep{1},\irrep{2},\irrepbar{4})+(\irrep{2},\irrep{3},\irrep{4})+(\irrep{3},\irrep{2},\irrepbar{4})+(\irrep{2},\irrep{1},\irrep{20})+(\irrep{1},\irrep{2},\irrepbar{20})$\\
\irrep{210} & = & $(\irrep{1},\irrep{1},\irrep{1})+(\irrep{2},\irrep{2},\irrep{6})+(\irrep{2},\irrep{2},\irrep{10})+(\irrep{2},\irrep{2},\irrepbar{10})+(\irrep{1},\irrep{1},\irrep{15})+(\irrep{3},\irrep{1},\irrep{15})+(\irrep{1},\irrep{3},\irrep{15})$\\
\irrep[1]{210} & = & $(\irrep{2},\irrep{2},\irrep{1})+(\irrep{1},\irrep{1},\irrep{6})+(\irrep{4},\irrep{4},\irrep{1})+(\irrep{3},\irrep{3},\irrep{6})+(\irrep{2},\irrep{2},\irrep[1]{20})+(\irrep{1},\irrep{1},\irrep{50})$\\
\irrep{320} & = & $(\irrep{2},\irrep{2},\irrep{1})+(\irrep{4},\irrep{2},\irrep{1})+(\irrep{2},\irrep{4},\irrep{1})+(\irrep{1},\irrep{1},\irrep{6})+(\irrep{3},\irrep{1},\irrep{6})+(\irrep{1},\irrep{3},\irrep{6})+(\irrep{3},\irrep{3},\irrep{6})+(\irrep{2},\irrep{2},\irrep{15})+(\irrep{2},\irrep{2},\irrep[1]{20})+(\irrep{1},\irrep{1},\irrep{64})$\\
\irrep{560} & = & $(\irrep{2},\irrep{1},\irrep{4})+(\irrep{1},\irrep{2},\irrepbar{4})+(\irrep{2},\irrep{3},\irrep{4})+(\irrep{3},\irrep{2},\irrepbar{4})+(\irrep{4},\irrep{1},\irrep{4})+(\irrep{1},\irrep{4},\irrepbar{4})+(\irrep{2},\irrep{1},\irrep{20})+(\irrep{1},\irrep{2},\irrepbar{20})+(\irrep{2},\irrep{3},\irrep{20})+(\irrep{3},\irrep{2},\irrepbar{20})+(\irrep{2},\irrep{1},\irrep{36})+(\irrep{1},\irrep{2},\irrepbar{36})$\\
\irrep{660} & = & $(\irrep{1},\irrep{1},\irrep{1})+(\irrep{3},\irrep{3},\irrep{1})+(\irrep{2},\irrep{2},\irrep{6})+(\irrep{5},\irrep{5},\irrep{1})+(\irrep{4},\irrep{4},\irrep{6})+(\irrep{1},\irrep{1},\irrep[1]{20})+(\irrep{3},\irrep{3},\irrep[1]{20})+(\irrep{2},\irrep{2},\irrep{50})+(\irrep{1},\irrep{1},\irrep{105})$\\
\irrep{672} & = & $(\irrep{2},\irrep{1},\irrep{20})+(\irrep{1},\irrep{2},\irrepbar{20})+(\irrep{4},\irrep{1},\irrep[2]{20})+(\irrep{1},\irrep{4},\irrepbar[2]{20})+(\irrep{2},\irrep{3},\irrep{36})+(\irrep{3},\irrep{2},\irrepbar{36})$\\
\irrep{720} & = & $(\irrep{2},\irrep{1},\irrep{4})+(\irrep{1},\irrep{2},\irrepbar{4})+(\irrep{2},\irrep{3},\irrep{4})+(\irrep{3},\irrep{2},\irrepbar{4})+(\irrep{4},\irrep{3},\irrep{4})+(\irrep{3},\irrep{4},\irrepbar{4})+(\irrep{2},\irrep{1},\irrep{20})+(\irrep{1},\irrep{2},\irrepbar{20})+(\irrep{2},\irrep{3},\irrep{20})+(\irrep{3},\irrep{2},\irrepbar{20})+(\irrep{2},\irrep{1},\irrep{60})+(\irrep{1},\irrep{2},\irrepbar{60})$\\
\irrep{770} & = & $(\irrep{1},\irrep{1},\irrep{1})+(\irrep{3},\irrep{3},\irrep{1})+(\irrep{5},\irrep{1},\irrep{1})+(\irrep{1},\irrep{5},\irrep{1})+(\irrep{2},\irrep{2},\irrep{6})+(\irrep{4},\irrep{2},\irrep{6})+(\irrep{2},\irrep{4},\irrep{6})+(\irrep{3},\irrep{1},\irrep{15})+(\irrep{1},\irrep{3},\irrep{15})+(\irrep{1},\irrep{1},\irrep[1]{20})+(\irrep{3},\irrep{3},\irrep[1]{20})+(\irrep{2},\irrep{2},\irrep{64})+(\irrep{1},\irrep{1},\irrep{84})$\\
\irrep{945} & = & $(\irrep{3},\irrep{1},\irrep{1})+(\irrep{1},\irrep{3},\irrep{1})+(\irrep{3},\irrep{3},\irrep{1})+2(\irrep{2},\irrep{2},\irrep{6})+(\irrep{4},\irrep{2},\irrep{6})+(\irrep{2},\irrep{4},\irrep{6})+(\irrep{2},\irrep{2},\irrep{10})+(\irrep{2},\irrep{2},\irrepbar{10})+(\irrep{1},\irrep{1},\irrep{15})+(\irrep{3},\irrep{1},\irrep{15})+(\irrep{1},\irrep{3},\irrep{15})+(\irrep{3},\irrep{3},\irrep{15})+(\irrep{3},\irrep{1},\irrep[1]{20})+(\irrep{1},\irrep{3},\irrep[1]{20})+(\irrep{1},\irrep{1},\irrep{45})+(\irrep{1},\irrep{1},\irrepbar{45})+(\irrep{2},\irrep{2},\irrep{64})$\\
\irrep{1050} & = & $(\irrep{2},\irrep{2},\irrep{6})+(\irrep{2},\irrep{2},\irrep{10})+(\irrep{2},\irrep{2},\irrepbar{10})+(\irrep{4},\irrep{2},\irrepbar{10})+(\irrep{2},\irrep{4},\irrep{10})+(\irrep{1},\irrep{1},\irrep{15})+(\irrep{3},\irrep{1},\irrep{15})+(\irrep{1},\irrep{3},\irrep{15})+(\irrep{3},\irrep{3},\irrep{15})+(\irrep{1},\irrep{1},\irrep[1]{20})+(\irrep{3},\irrep{1},\irrepbar{45})+(\irrep{1},\irrep{3},\irrep{45})+(\irrep{2},\irrep{2},\irrep{64})$\\
\irrep{1200} & = & $(\irrep{2},\irrep{1},\irrep{4})+(\irrep{1},\irrep{2},\irrepbar{4})+(\irrep{2},\irrep{3},\irrep{4})+(\irrep{3},\irrep{2},\irrepbar{4})+(\irrep{2},\irrep{1},\irrep{20})+(\irrep{1},\irrep{2},\irrepbar{20})+(\irrep{2},\irrep{1},\irrep[2]{20})+(\irrep{1},\irrep{2},\irrepbar[2]{20})+(\irrep{2},\irrep{3},\irrep{20})+(\irrep{3},\irrep{2},\irrepbar{20})+(\irrep{4},\irrep{1},\irrep{20})+(\irrep{1},\irrep{4},\irrepbar{20})+(\irrep{2},\irrep{1},\irrep{36})+(\irrep{1},\irrep{2},\irrepbar{36})+(\irrep{2},\irrep{3},\irrep{36})+(\irrep{3},\irrep{2},\irrepbar{36})$\\
\irrep{1386} & = & $(\irrep{3},\irrep{1},\irrep{1})+(\irrep{1},\irrep{3},\irrep{1})+(\irrep{3},\irrep{3},\irrep{1})+(\irrep{5},\irrep{3},\irrep{1})+(\irrep{3},\irrep{5},\irrep{1})+2(\irrep{2},\irrep{2},\irrep{6})+(\irrep{4},\irrep{2},\irrep{6})+(\irrep{2},\irrep{4},\irrep{6})+(\irrep{4},\irrep{4},\irrep{6})+(\irrep{1},\irrep{1},\irrep{15})+(\irrep{3},\irrep{3},\irrep{15})+(\irrep{1},\irrep{1},\irrep[1]{20})+(\irrep{3},\irrep{1},\irrep[1]{20})+(\irrep{1},\irrep{3},\irrep[1]{20})+(\irrep{3},\irrep{3},\irrep[1]{20})+(\irrep{2},\irrep{2},\irrep{50})+(\irrep{2},\irrep{2},\irrep{64})+(\irrep{1},\irrep{1},\irrep{175})$\\
\irrep{1440} & = & $(\irrep{2},\irrep{1},\irrep{4})+(\irrep{1},\irrep{2},\irrepbar{4})+(\irrep{2},\irrep{1},\irrep{20})+(\irrep{1},\irrep{2},\irrepbar{20})+(\irrep{2},\irrep{3},\irrep{20})+(\irrep{3},\irrep{2},\irrepbar{20})+(\irrep{2},\irrep{3},\irrep[2]{20})+(\irrep{3},\irrep{2},\irrepbar[2]{20})+(\irrep{2},\irrep{1},\irrep{36})+(\irrep{1},\irrep{2},\irrepbar{36})+(\irrep{2},\irrep{3},\irrep{36})+(\irrep{3},\irrep{2},\irrepbar{36})+(\irrep{4},\irrep{1},\irrep{36})+(\irrep{1},\irrep{4},\irrepbar{36})$\\
\irrep{1728} & = & $(\irrep{2},\irrep{2},\irrep{1})+(\irrep{1},\irrep{1},\irrep{6})+(\irrep{3},\irrep{1},\irrep{6})+(\irrep{1},\irrep{3},\irrep{6})+(\irrep{1},\irrep{1},\irrep{10})+(\irrep{1},\irrep{1},\irrepbar{10})+(\irrep{3},\irrep{3},\irrep{6})+(\irrep{3},\irrep{1},\irrep{10})+(\irrep{3},\irrep{1},\irrepbar{10})+(\irrep{1},\irrep{3},\irrep{10})+(\irrep{1},\irrep{3},\irrepbar{10})+(\irrep{3},\irrep{3},\irrep{10})+(\irrep{3},\irrep{3},\irrepbar{10})+3(\irrep{2},\irrep{2},\irrep{15})+(\irrep{4},\irrep{2},\irrep{15})+(\irrep{2},\irrep{4},\irrep{15})+(\irrep{2},\irrep{2},\irrep[1]{20})+(\irrep{2},\irrep{2},\irrep{45})+(\irrep{2},\irrep{2},\irrepbar{45})+(\irrep{1},\irrep{1},\irrep{64})+(\irrep{3},\irrep{1},\irrep{64})+(\irrep{1},\irrep{3},\irrep{64})$\\
\irrep{1782} & = & $(\irrep{2},\irrep{2},\irrep{1})+(\irrep{1},\irrep{1},\irrep{6})+(\irrep{4},\irrep{4},\irrep{1})+(\irrep{3},\irrep{3},\irrep{6})+(\irrep{6},\irrep{6},\irrep{1})+(\irrep{5},\irrep{5},\irrep{6})+(\irrep{2},\irrep{2},\irrep[1]{20})+(\irrep{4},\irrep{4},\irrep[1]{20})+(\irrep{1},\irrep{1},\irrep{50})+(\irrep{3},\irrep{3},\irrep{50})+(\irrep{2},\irrep{2},\irrep{105})+(\irrep{1},\irrep{1},\irrep{196})$\\
\irrep{2640} & = & $(\irrep{2},\irrep{1},\irrep{4})+(\irrep{1},\irrep{2},\irrepbar{4})+(\irrep{2},\irrep{3},\irrep{4})+(\irrep{3},\irrep{2},\irrepbar{4})+(\irrep{4},\irrep{3},\irrep{4})+(\irrep{3},\irrep{4},\irrepbar{4})+(\irrep{4},\irrep{5},\irrep{4})+(\irrep{5},\irrep{4},\irrepbar{4})+(\irrep{2},\irrep{1},\irrep{20})+(\irrep{1},\irrep{2},\irrepbar{20})+(\irrep{2},\irrep{3},\irrep{20})+(\irrep{3},\irrep{2},\irrepbar{20})+(\irrep{4},\irrep{3},\irrep{20})+(\irrep{3},\irrep{4},\irrepbar{20})+(\irrep{2},\irrep{1},\irrep{60})+(\irrep{1},\irrep{2},\irrepbar{60})+(\irrep{2},\irrep{3},\irrep{60})+(\irrep{3},\irrep{2},\irrepbar{60})+(\irrep{2},\irrep{1},\irrep[1]{140})+(\irrep{1},\irrep{2},\irrepbar[1]{140})$\\
\irrep{2772} & = & $(\irrep{1},\irrep{1},\irrep[1]{20})+(\irrep{5},\irrep{1},\irrepbar{35})+(\irrep{1},\irrep{5},\irrep{35})+(\irrep{3},\irrep{1},\irrepbar{45})+(\irrep{1},\irrep{3},\irrep{45})+(\irrep{2},\irrep{2},\irrep{64})+(\irrep{4},\irrep{2},\irrepbar{70})+(\irrep{2},\irrep{4},\irrep{70})+(\irrep{3},\irrep{3},\irrep{84})$\\
\irrep{2970} & = & $(\irrep{2},\irrep{2},\irrep{1})+(\irrep{4},\irrep{2},\irrep{1})+(\irrep{2},\irrep{4},\irrep{1})+(\irrep{1},\irrep{1},\irrep{6})+(\irrep{3},\irrep{1},\irrep{6})+(\irrep{1},\irrep{3},\irrep{6})+2(\irrep{3},\irrep{3},\irrep{6})+(\irrep{5},\irrep{1},\irrep{6})+(\irrep{1},\irrep{5},\irrep{6})+(\irrep{3},\irrep{1},\irrep{10})+(\irrep{3},\irrep{1},\irrepbar{10})+(\irrep{1},\irrep{3},\irrep{10})+(\irrep{1},\irrep{3},\irrepbar{10})+2(\irrep{2},\irrep{2},\irrep{15})+(\irrep{4},\irrep{2},\irrep{15})+(\irrep{2},\irrep{4},\irrep{15})+(\irrep{2},\irrep{2},\irrep[1]{20})+(\irrep{4},\irrep{2},\irrep[1]{20})+(\irrep{2},\irrep{4},\irrep[1]{20})+(\irrep{2},\irrep{2},\irrep{45})+(\irrep{2},\irrep{2},\irrepbar{45})+(\irrep{1},\irrep{1},\irrep{64})+(\irrep{3},\irrep{1},\irrep{64})+(\irrep{1},\irrep{3},\irrep{64})+(\irrep{3},\irrep{3},\irrep{64})+(\irrep{1},\irrep{1},\irrep{70})+(\irrep{1},\irrep{1},\irrepbar{70})+(\irrep{2},\irrep{2},\irrep{84})$\\
\bottomrule
\rowcolor{white}\\[-\medskipamount]
\newpage
\toprule
\rowcolor{tableheadcolor}
SO(10)& $\to$ &SO(9)\\
\midrule
\irrep{10} & = & $\irrep{1}+\irrep{9}$\\
\irrep{16} & = & $\irrep{16}$\\
\irrep{45} & = & $\irrep{9}+\irrep{36}$\\
\irrep{54} & = & $\irrep{1}+\irrep{9}+\irrep{44}$\\
\irrep{120} & = & $\irrep{36}+\irrep{84}$\\
\irrep{126} & = & $\irrep{126}$\\
\irrep{144} & = & $\irrep{16}+\irrep{128}$\\
\irrep{210} & = & $\irrep{84}+\irrep{126}$\\
\irrep[1]{210} & = & $\irrep{1}+\irrep{9}+\irrep{44}+\irrep{156}$\\
\irrep{320} & = & $\irrep{9}+\irrep{36}+\irrep{44}+\irrep{231}$\\
\irrep{560} & = & $\irrep{128}+\irrep{432}$\\
\irrep{660} & = & $\irrep{1}+\irrep{9}+\irrep{44}+\irrep{156}+\irrep{450}$\\
\irrep{672} & = & $\irrep{672}$\\
\irrep{720} & = & $\irrep{16}+\irrep{128}+\irrep{576}$\\
\irrep{770} & = & $\irrep{44}+\irrep{231}+\irrep{495}$\\
\irrep{945} & = & $\irrep{36}+\irrep{84}+\irrep{231}+\irrep{594}$\\
\irrep{1050} & = & $\irrep{126}+\irrep{924}$\\
\irrep{1200} & = & $\irrep{432}+\irrep{768}$\\
\irrep{1386} & = & $\irrep{9}+\irrep{36}+\irrep{44}+\irrep{156}+\irrep{231}+\irrep{910}$\\
\irrep{1440} & = & $\irrep{672}+\irrep{768}$\\
\irrep{1728} & = & $\irrep{84}+\irrep{126}+\irrep{594}+\irrep{924}$\\
\irrep{1782} & = & $\irrep{1}+\irrep{9}+\irrep{44}+\irrep{156}+\irrep{450}+\irrep{1122}$\\
\irrep{2640} & = & $\irrep{16}+\irrep{128}+\irrep{576}+\irrep{1920}$\\
\irrep{2772} & = & $\irrep[1]{2772}$\\
\irrep{2970} & = & $\irrep{231}+\irrep{495}+\irrep{594}+\irrep{1650}$\\
\bottomrule
\rowcolor{white}\\[-\medskipamount]
\toprule
\rowcolor{tableheadcolor}
SO(10)& $\to$ &SU(2)${\times}$SO(7)\\
\midrule
\irrep{10} & = & $(\irrep{3},\irrep{1})+(\irrep{1},\irrep{7})$\\
\irrep{16} & = & $(\irrep{2},\irrep{8})$\\
\irrep{45} & = & $(\irrep{3},\irrep{1})+(\irrep{3},\irrep{7})+(\irrep{1},\irrep{21})$\\
\irrep{54} & = & $(\irrep{1},\irrep{1})+(\irrep{5},\irrep{1})+(\irrep{3},\irrep{7})+(\irrep{1},\irrep{27})$\\
\irrep{120} & = & $(\irrep{1},\irrep{1})+(\irrep{3},\irrep{7})+(\irrep{3},\irrep{21})+(\irrep{1},\irrep{35})$\\
\irrep{126} & = & $(\irrep{1},\irrep{21})+(\irrep{3},\irrep{35})$\\
\irrep{144} & = & $(\irrep{2},\irrep{8})+(\irrep{4},\irrep{8})+(\irrep{2},\irrep{48})$\\
\irrep{210} & = & $(\irrep{1},\irrep{7})+(\irrep{3},\irrep{21})+(\irrep{1},\irrep{35})+(\irrep{3},\irrep{35})$\\
\irrep[1]{210} & = & $(\irrep{3},\irrep{1})+(\irrep{1},\irrep{7})+(\irrep{7},\irrep{1})+(\irrep{5},\irrep{7})+(\irrep{3},\irrep{27})+(\irrep{1},\irrep{77})$\\
\irrep{320} & = & $(\irrep{3},\irrep{1})+(\irrep{5},\irrep{1})+(\irrep{1},\irrep{7})+(\irrep{3},\irrep{7})+(\irrep{5},\irrep{7})+(\irrep{3},\irrep{21})+(\irrep{3},\irrep{27})+(\irrep{1},\irrep{105})$\\
\irrep{560} & = & $(\irrep{2},\irrep{8})+(\irrep{4},\irrep{8})+(\irrep{2},\irrep{48})+(\irrep{4},\irrep{48})+(\irrep{2},\irrep{112})$\\
\irrep{660} & = & $(\irrep{1},\irrep{1})+(\irrep{5},\irrep{1})+(\irrep{3},\irrep{7})+(\irrep{9},\irrep{1})+(\irrep{7},\irrep{7})+(\irrep{1},\irrep{27})+(\irrep{5},\irrep{27})+(\irrep{3},\irrep{77})+(\irrep{1},\irrep{182})$\\
\irrep{672} & = & $(\irrep{2},\irrep{112})+(\irrep{4},\irrep[1]{112})$\\
\irrep{720} & = & $(\irrep{2},\irrep{8})+(\irrep{4},\irrep{8})+(\irrep{6},\irrep{8})+(\irrep{2},\irrep{48})+(\irrep{4},\irrep{48})+(\irrep{2},\irrep{168})$\\
\irrep{770} & = & $(\irrep{1},\irrep{1})+(\irrep{5},\irrep{1})+(\irrep{3},\irrep{7})+(\irrep{5},\irrep{7})+(\irrep{3},\irrep{21})+(\irrep{1},\irrep{27})+(\irrep{5},\irrep{27})+(\irrep{3},\irrep{105})+(\irrep{1},\irrep[1]{168})$\\
\irrep{945} & = & $(\irrep{3},\irrep{1})+(\irrep{1},\irrep{7})+(\irrep{3},\irrep{7})+(\irrep{5},\irrep{7})+(\irrep{1},\irrep{21})+(\irrep{3},\irrep{21})+(\irrep{5},\irrep{21})+(\irrep{3},\irrep{27})+(\irrep{3},\irrep{35})+(\irrep{3},\irrep{105})+(\irrep{1},\irrep{189})$\\
\irrep{1050} & = & $(\irrep{3},\irrep{21})+(\irrep{1},\irrep{35})+(\irrep{3},\irrep{35})+(\irrep{5},\irrep{35})+(\irrep{1},\irrep{105})+(\irrep{3},\irrep{189})$\\
\irrep{1200} & = & $(\irrep{2},\irrep{8})+(\irrep{2},\irrep{48})+(\irrep{4},\irrep{48})+(\irrep{2},\irrep{112})+(\irrep{2},\irrep[1]{112})+(\irrep{4},\irrep{112})$\\
\irrep{1386} & = & $(\irrep{3},\irrep{1})+(\irrep{5},\irrep{1})+(\irrep{7},\irrep{1})+2(\irrep{3},\irrep{7})+(\irrep{5},\irrep{7})+(\irrep{7},\irrep{7})+(\irrep{1},\irrep{21})+(\irrep{5},\irrep{21})+(\irrep{1},\irrep{27})+(\irrep{3},\irrep{27})+(\irrep{5},\irrep{27})+(\irrep{3},\irrep{77})+(\irrep{3},\irrep{105})+(\irrep{1},\irrep{330})$\\
\irrep{1440} & = & $(\irrep{2},\irrep{48})+(\irrep{2},\irrep{112})+(\irrep{2},\irrep[1]{112})+(\irrep{4},\irrep{112})+(\irrep{4},\irrep[1]{112})$\\
\irrep{1728} & = & $(\irrep{3},\irrep{7})+(\irrep{1},\irrep{21})+(\irrep{3},\irrep{21})+(\irrep{5},\irrep{21})+(\irrep{1},\irrep{27})+(\irrep{1},\irrep{35})+2(\irrep{3},\irrep{35})+(\irrep{5},\irrep{35})+(\irrep{3},\irrep{105})+(\irrep{1},\irrep{189})+(\irrep{3},\irrep{189})$\\
\irrep{1782} & = & $(\irrep{3},\irrep{1})+(\irrep{1},\irrep{7})+(\irrep{7},\irrep{1})+(\irrep{5},\irrep{7})+(\irrep{11},\irrep{1})+(\irrep{9},\irrep{7})+(\irrep{3},\irrep{27})+(\irrep{7},\irrep{27})+(\irrep{1},\irrep{77})+(\irrep{5},\irrep{77})+(\irrep{3},\irrep{182})+(\irrep{1},\irrep[1]{378})$\\
\irrep{2640} & = & $(\irrep{2},\irrep{8})+(\irrep{4},\irrep{8})+(\irrep{6},\irrep{8})+(\irrep{8},\irrep{8})+(\irrep{2},\irrep{48})+(\irrep{4},\irrep{48})+(\irrep{6},\irrep{48})+(\irrep{2},\irrep{168})+(\irrep{4},\irrep{168})+(\irrep{2},\irrep{448})$\\
\irrep{2772} & = & $(\irrep{1},\irrep[1]{168})+(\irrep{5},\irrep{294})+(\irrep{3},\irrep{378})$\\
\irrep{2970} & = & $(\irrep{3},\irrep{1})+(\irrep{1},\irrep{7})+(\irrep{3},\irrep{7})+(\irrep{5},\irrep{7})+(\irrep{1},\irrep{21})+(\irrep{3},\irrep{21})+(\irrep{5},\irrep{21})+(\irrep{3},\irrep{27})+(\irrep{5},\irrep{27})+(\irrep{3},\irrep{35})+(\irrep{1},\irrep{105})+(\irrep{3},\irrep{105})+(\irrep{5},\irrep{105})+(\irrep{3},\irrep[1]{168})+(\irrep{3},\irrep{189})+(\irrep{1},\irrep{378})$\\
\bottomrule
\end{longtable}
\begin{longtable}{rcp{0.9\textwidth}}
\caption{\label{tab:SO14BranchingRules}SO(14) Branching Rules}\\
\endfirsthead
\caption[]{SO(14) Branching Rules (continued)}\\
\endhead
\endfoot
\toprule
\rowcolor{tableheadcolor}
SO(14)& $\to$ &SU(2)${\times}$SU(2)${\times}$SO(10)\\
\midrule
\irrep{14} & = & $(\irrep{2},\irrep{2},\irrep{1})+(\irrep{1},\irrep{1},\irrep{10})$\\
\irrep{64} & = & $(\irrep{2},\irrep{1},\irrep{16})+(\irrep{1},\irrep{2},\irrepbar{16})$\\
\irrep{91} & = & $(\irrep{3},\irrep{1},\irrep{1})+(\irrep{1},\irrep{3},\irrep{1})+(\irrep{2},\irrep{2},\irrep{10})+(\irrep{1},\irrep{1},\irrep{45})$\\
\irrep{104} & = & $(\irrep{1},\irrep{1},\irrep{1})+(\irrep{3},\irrep{3},\irrep{1})+(\irrep{2},\irrep{2},\irrep{10})+(\irrep{1},\irrep{1},\irrep{54})$\\
\irrep{364} & = & $(\irrep{2},\irrep{2},\irrep{1})+(\irrep{3},\irrep{1},\irrep{10})+(\irrep{1},\irrep{3},\irrep{10})+(\irrep{2},\irrep{2},\irrep{45})+(\irrep{1},\irrep{1},\irrep{120})$\\
\irrep{546} & = & $(\irrep{2},\irrep{2},\irrep{1})+(\irrep{4},\irrep{4},\irrep{1})+(\irrep{1},\irrep{1},\irrep{10})+(\irrep{3},\irrep{3},\irrep{10})+(\irrep{2},\irrep{2},\irrep{54})+(\irrep{1},\irrep{1},\irrep[1]{210})$\\
\irrep{832} & = & $(\irrep{2},\irrep{1},\irrep{16})+(\irrep{1},\irrep{2},\irrepbar{16})+(\irrep{2},\irrep{3},\irrep{16})+(\irrep{3},\irrep{2},\irrepbar{16})+(\irrep{2},\irrep{1},\irrep{144})+(\irrep{1},\irrep{2},\irrepbar{144})$\\
\irrep{896} & = & $(\irrep{2},\irrep{2},\irrep{1})+(\irrep{4},\irrep{2},\irrep{1})+(\irrep{2},\irrep{4},\irrep{1})+(\irrep{1},\irrep{1},\irrep{10})+(\irrep{3},\irrep{1},\irrep{10})+(\irrep{1},\irrep{3},\irrep{10})+(\irrep{3},\irrep{3},\irrep{10})+(\irrep{2},\irrep{2},\irrep{45})+(\irrep{2},\irrep{2},\irrep{54})+(\irrep{1},\irrep{1},\irrep{320})$\\
\irrep{1001} & = & $(\irrep{1},\irrep{1},\irrep{1})+(\irrep{2},\irrep{2},\irrep{10})+(\irrep{3},\irrep{1},\irrep{45})+(\irrep{1},\irrep{3},\irrep{45})+(\irrep{2},\irrep{2},\irrep{120})+(\irrep{1},\irrep{1},\irrep{210})$\\
\irrep{1716} & = & $(\irrep{1},\irrep{1},\irrep{120})+(\irrep{3},\irrep{1},\irrepbar{126})+(\irrep{1},\irrep{3},\irrep{126})+(\irrep{2},\irrep{2},\irrep{210})$\\
\irrep{2002} & = & $(\irrep{1},\irrep{1},\irrep{10})+(\irrep{2},\irrep{2},\irrep{45})+(\irrep{3},\irrep{1},\irrep{120})+(\irrep{1},\irrep{3},\irrep{120})+(\irrep{1},\irrep{1},\irrep{126})+(\irrep{1},\irrep{1},\irrepbar{126})+(\irrep{2},\irrep{2},\irrep{210})$\\
\irrep{2275} & = & $(\irrep{1},\irrep{1},\irrep{1})+(\irrep{3},\irrep{3},\irrep{1})+(\irrep{5},\irrep{5},\irrep{1})+(\irrep{2},\irrep{2},\irrep{10})+(\irrep{4},\irrep{4},\irrep{10})+(\irrep{1},\irrep{1},\irrep{54})+(\irrep{3},\irrep{3},\irrep{54})+(\irrep{2},\irrep{2},\irrep[1]{210})+(\irrep{1},\irrep{1},\irrep{660})$\\
\irrep{3003} & = & $(\irrep{1},\irrep{1},\irrep{45})+(\irrep{2},\irrep{2},\irrep{120})+(\irrep{2},\irrep{2},\irrep{126})+(\irrep{2},\irrep{2},\irrepbar{126})+(\irrep{1},\irrep{1},\irrep{210})+(\irrep{3},\irrep{1},\irrep{210})+(\irrep{1},\irrep{3},\irrep{210})$\\
\irrep{3080} & = & $(\irrep{1},\irrep{1},\irrep{1})+(\irrep{3},\irrep{3},\irrep{1})+(\irrep{5},\irrep{1},\irrep{1})+(\irrep{1},\irrep{5},\irrep{1})+(\irrep{2},\irrep{2},\irrep{10})+(\irrep{4},\irrep{2},\irrep{10})+(\irrep{2},\irrep{4},\irrep{10})+(\irrep{3},\irrep{1},\irrep{45})+(\irrep{1},\irrep{3},\irrep{45})+(\irrep{1},\irrep{1},\irrep{54})+(\irrep{3},\irrep{3},\irrep{54})+(\irrep{2},\irrep{2},\irrep{320})+(\irrep{1},\irrep{1},\irrep{770})$\\
\irrep{4004} & = & $(\irrep{3},\irrep{1},\irrep{1})+(\irrep{1},\irrep{3},\irrep{1})+(\irrep{3},\irrep{3},\irrep{1})+2(\irrep{2},\irrep{2},\irrep{10})+(\irrep{4},\irrep{2},\irrep{10})+(\irrep{2},\irrep{4},\irrep{10})+(\irrep{1},\irrep{1},\irrep{45})+(\irrep{3},\irrep{1},\irrep{45})+(\irrep{1},\irrep{3},\irrep{45})+(\irrep{3},\irrep{3},\irrep{45})+(\irrep{3},\irrep{1},\irrep{54})+(\irrep{1},\irrep{3},\irrep{54})+(\irrep{2},\irrep{2},\irrep{120})+(\irrep{2},\irrep{2},\irrep{320})+(\irrep{1},\irrep{1},\irrep{945})$\\
\irrep{4928} & = & $(\irrep{2},\irrep{1},\irrep{16})+(\irrep{1},\irrep{2},\irrepbar{16})+(\irrep{2},\irrep{3},\irrep{16})+(\irrep{3},\irrep{2},\irrepbar{16})+(\irrep{4},\irrep{1},\irrep{16})+(\irrep{1},\irrep{4},\irrepbar{16})+(\irrep{2},\irrep{1},\irrep{144})+(\irrep{1},\irrep{2},\irrepbar{144})+(\irrep{2},\irrep{3},\irrep{144})+(\irrep{3},\irrep{2},\irrepbar{144})+(\irrep{2},\irrep{1},\irrep{560})+(\irrep{1},\irrep{2},\irrepbar{560})$\\
\irrep{5265} & = & $(\irrep{3},\irrep{1},\irrep{1})+(\irrep{1},\irrep{3},\irrep{1})+(\irrep{3},\irrep{3},\irrep{1})+(\irrep{5},\irrep{3},\irrep{1})+(\irrep{3},\irrep{5},\irrep{1})+2(\irrep{2},\irrep{2},\irrep{10})+(\irrep{4},\irrep{2},\irrep{10})+(\irrep{2},\irrep{4},\irrep{10})+(\irrep{4},\irrep{4},\irrep{10})+(\irrep{1},\irrep{1},\irrep{45})+(\irrep{3},\irrep{3},\irrep{45})+(\irrep{1},\irrep{1},\irrep{54})+(\irrep{3},\irrep{1},\irrep{54})+(\irrep{1},\irrep{3},\irrep{54})+(\irrep{3},\irrep{3},\irrep{54})+(\irrep{2},\irrep{2},\irrep[1]{210})+(\irrep{2},\irrep{2},\irrep{320})+(\irrep{1},\irrep{1},\irrep{1386})$\\
\irrep{5824} & = & $(\irrep{2},\irrep{1},\irrep{16})+(\irrep{1},\irrep{2},\irrepbar{16})+(\irrep{2},\irrep{3},\irrep{16})+(\irrep{3},\irrep{2},\irrepbar{16})+(\irrep{4},\irrep{3},\irrep{16})+(\irrep{3},\irrep{4},\irrepbar{16})+(\irrep{2},\irrep{1},\irrep{144})+(\irrep{1},\irrep{2},\irrepbar{144})+(\irrep{2},\irrep{3},\irrep{144})+(\irrep{3},\irrep{2},\irrepbar{144})+(\irrep{2},\irrep{1},\irrep{720})+(\irrep{1},\irrep{2},\irrepbar{720})$\\
\bottomrule
\end{longtable}
\newpage
\begin{longtable}{rcp{0.9\textwidth}}
\caption{\label{tab:SO18BranchingRules}SO(18) Branching Rules}\\
\endfirsthead
\caption[]{SO(18) Branching Rules (continued)}\\
\endhead
\endfoot
\toprule
\rowcolor{tableheadcolor}
SO(18)& $\to$ &SO(8)${\times}$SO(10)\\
\midrule
\irrep{18} & = & $(\irrepsub{8}{v},\irrep{1})+(\irrep{1},\irrep{10})$\\
\irrep{153} & = & $(\irrepsub{8}{v},\irrep{10})+(\irrep{28},\irrep{1})+(\irrep{1},\irrep{45})$\\
\irrep{170} & = & $(\irrep{1},\irrep{1})+(\irrepsub{8}{v},\irrep{10})+(\irrepsub{35}{v},\irrep{1})+(\irrep{1},\irrep{54})$\\
\irrep{256} & = & $(\irrepsub{8}{s},\irrepbar{16})+(\irrepsub{8}{c},\irrep{16})$\\
\irrep{816} & = & $(\irrep{28},\irrep{10})+(\irrepsub{8}{v},\irrep{45})+(\irrepsub{56}{v},\irrep{1})+(\irrep{1},\irrep{120})$\\
\irrep{1122} & = & $(\irrepsub{8}{v},\irrep{1})+(\irrep{1},\irrep{10})+(\irrepsub{35}{v},\irrep{10})+(\irrepsub{8}{v},\irrep{54})+(\irrepsub{112}{v},\irrep{1})+(\irrep{1},\irrep[1]{210})$\\
\irrep{1920} & = & $(\irrepsub{8}{v},\irrep{1})+(\irrep{1},\irrep{10})+(\irrep{28},\irrep{10})+(\irrepsub{35}{v},\irrep{10})+(\irrepsub{8}{v},\irrep{45})+(\irrepsub{8}{v},\irrep{54})+(\irrepsub{160}{v},\irrep{1})+(\irrep{1},\irrep{320})$\\
\irrep{3060} & = & $(\irrepsub{35}{c},\irrep{1})+(\irrepsub{35}{s},\irrep{1})+(\irrepsub{56}{v},\irrep{10})+(\irrep{28},\irrep{45})+(\irrepsub{8}{v},\irrep{120})+(\irrep{1},\irrep{210})$\\
\irrep{4352} & = & $(\irrepsub{8}{s},\irrepbar{16})+(\irrepsub{8}{c},\irrep{16})+(\irrepsub{56}{s},\irrepbar{16})+(\irrepsub{56}{c},\irrep{16})+(\irrepsub{8}{s},\irrepbar{144})+(\irrepsub{8}{c},\irrep{144})$\\
\bottomrule
\end{longtable}
\begin{longtable}{rcp{0.9\textwidth}}
\caption{\label{tab:SO22BranchingRules}SO(22) Branching Rules}\\
\endfirsthead
\caption[]{SO(22) Branching Rules (continued)}\\
\endhead
\endfoot
\toprule
\rowcolor{tableheadcolor}
SO(22)& $\to$ &SO(12)${\times}$SO(10)\\
\midrule
\irrep{22} & = & $(\irrep{1},\irrep{10})+(\irrep{12},\irrep{1})$\\
\irrep{231} & = & $(\irrep{12},\irrep{10})+(\irrep{1},\irrep{45})+(\irrep{66},\irrep{1})$\\
\irrep{252} & = & $(\irrep{1},\irrep{1})+(\irrep{12},\irrep{10})+(\irrep{1},\irrep{54})+(\irrep{77},\irrep{1})$\\
\irrep{1024} & = & $(\irrepbar{32},\irrep{16})+(\irrep{32},\irrepbar{16})$\\
\irrep{1540} & = & $(\irrep{12},\irrep{45})+(\irrep{66},\irrep{10})+(\irrep{1},\irrep{120})+(\irrep{220},\irrep{1})$\\
\irrep{3520} & = & $(\irrep{1},\irrep{10})+(\irrep{12},\irrep{1})+(\irrep{12},\irrep{45})+(\irrep{12},\irrep{54})+(\irrep{66},\irrep{10})+(\irrep{77},\irrep{10})+(\irrep{1},\irrep{320})+(\irrep{560},\irrep{1})$\\
\irrep{7315} & = & $(\irrep{66},\irrep{45})+(\irrep{12},\irrep{120})+(\irrep{1},\irrep{210})+(\irrep{220},\irrep{10})+(\irrep{495},\irrep{1})$\\
\bottomrule
\end{longtable}
\begin{longtable}{rcp{0.9\textwidth}}
\caption{\label{tab:SO26BranchingRules}SO(26) Branching Rules}\\
\endfirsthead
\caption[]{SO(26) Branching Rules (continued)}\\
\endhead
\endfoot
\toprule
\rowcolor{tableheadcolor}
SO(26)& $\to$ &SO(16)${\times}$SO(10)\\
\midrule
\irrep{26} & = & $(\irrep{1},\irrep{10})+(\irrep{16},\irrep{1})$\\
\irrep{325} & = & $(\irrep{16},\irrep{10})+(\irrep{1},\irrep{45})+(\irrep{120},\irrep{1})$\\
\irrep{2600} & = & $(\irrep{16},\irrep{45})+(\irrep{1},\irrep{120})+(\irrep{120},\irrep{10})+(\irrep{560},\irrep{1})$\\
\irrep{4096} & = & $(\irrep{128},\irrepbar{16})+(\irrepbar{128},\irrep{16})$\\
\irrep{5824} & = & $(\irrep{1},\irrep{10})+(\irrep{16},\irrep{1})+(\irrep{16},\irrep{45})+(\irrep{16},\irrep{54})+(\irrep{120},\irrep{10})+(\irrep{135},\irrep{10})+(\irrep{1},\irrep{320})+(\irrep{1344},\irrep{1})$\\
\irrep{14950} & = & $(\irrep{16},\irrep{120})+(\irrep{120},\irrep{45})+(\irrep{1},\irrep{210})+(\irrep{560},\irrep{10})+(\irrep{1820},\irrep{1})$\\
\irrep{52325} & = & $(\irrep{16},\irrep{10})+(\irrep{1},\irrep{45})+(\irrep{120},\irrep{1})+(\irrep{16},\irrep{120})+(\irrep{120},\irrep{45})+(\irrep{120},\irrep{54})+(\irrep{135},\irrep{45})+(\irrep{16},\irrep{320})+(\irrep{560},\irrep{10})+(\irrep{1},\irrep{945})+(\irrep{1344},\irrep{10})+(\irrep{7020},\irrep{1})$\\
\irrep{65780} & = & $(\irrep{1},\irrep{126})+(\irrep{1},\irrepbar{126})+(\irrep{16},\irrep{210})+(\irrep{120},\irrep{120})+(\irrep{560},\irrep{45})+(\irrep{1820},\irrep{10})+(\irrep{4368},\irrep{1})$\\
\bottomrule
\end{longtable}
\newpage
\subsubsection{Exceptional Algebras}
{\setlength\extrarowheight{1.1pt}
\enlargethispage{15pt}
\begin{longtable}{rcp{0.9\textwidth}}
\caption{\label{tab:E6BranchingRules}\E6 Branching Rules}\\
\endfirsthead
\caption[]{\E6 Branching Rules (continued)}\\
\endhead
\endfoot
\toprule
\rowcolor{tableheadcolor}
\E6& $\to$ &SO(10)${\times}$U(1)\\
\midrule
\irrep{27} & = & $(\irrep{1})(-4)+(\irrep{10})(2)+(\irrep{16})(-1)$\\
\irrep{78} & = & $(\irrep{1})(0)+(\irrep{16})(3)+(\irrepbar{16})(-3)+(\irrep{45})(0)$\\
\irrep{351} & = & $(\irrep{10})(2)+(\irrepbar{16})(5)+(\irrep{16})(-1)+(\irrep{45})(-4)+(\irrep{120})(2)+(\irrep{144})(-1)$\\
\irrep[1]{351} & = & $(\irrep{1})(8)+(\irrep{10})(2)+(\irrepbar{16})(5)+(\irrep{54})(-4)+(\irrep{126})(2)+(\irrep{144})(-1)$\\
\irrep{650} & = & $(\irrep{1})(0)+(\irrep{10})(6)+(\irrep{10})(-6)+(\irrep{16})(3)+(\irrepbar{16})(-3)+(\irrep{45})(0)+(\irrep{54})(0)+(\irrep{144})(3)+(\irrepbar{144})(-3)+(\irrep{210})(0)$\\
\irrep{1728} & = & $(\irrep{1})(-4)+(\irrep{10})(2)+2(\irrep{16})(-1)+(\irrepbar{16})(-7)+(\irrep{45})(-4)+(\irrep{120})(2)+(\irrepbar{126})(2)+(\irrepbar{144})(5)+(\irrep{144})(-1)+(\irrep{210})(-4)+(\irrep{320})(2)+(\irrep{560})(-1)$\\
\irrep{2430} & = & $(\irrep{1})(0)+(\irrep{16})(3)+(\irrepbar{16})(-3)+(\irrep{45})(0)+(\irrep{126})(-6)+(\irrepbar{126})(6)+(\irrep{210})(0)+(\irrep{560})(3)+(\irrepbar{560})(-3)+(\irrep{770})(0)$\\
\irrep{2925} & = & $(\irrep{16})(3)+(\irrepbar{16})(-3)+2(\irrep{45})(0)+(\irrep{120})(6)+(\irrep{120})(-6)+(\irrep{144})(3)+(\irrepbar{144})(-3)+(\irrep{210})(0)+(\irrep{560})(3)+(\irrepbar{560})(-3)+(\irrep{945})(0)$\\
\irrep{3003} & = & $(\irrep{1})(-12)+(\irrep{10})(-6)+(\irrep{16})(-9)+(\irrep{54})(0)+(\irrepbar{126})(-6)+(\irrepbar{144})(-3)+(\irrep[1]{210})(6)+(\irrepbar{672})(-3)+(\irrep{720})(3)+(\irrepbar{1050})(0)$\\
\irrep{5824} & = & $(\irrep{10})(-6)+(\irrepbar{16})(-3)+(\irrep{16})(-9)+(\irrep{45})(0)+(\irrep{54})(0)+(\irrep{120})(-6)+(\irrepbar{126})(-6)+(\irrep{144})(3)+2(\irrepbar{144})(-3)+(\irrep{210})(0)+(\irrep{320})(6)+(\irrep{560})(3)+(\irrep{720})(3)+(\irrep{945})(0)+(\irrepbar{1050})(0)+(\irrepbar{1200})(-3)$\\
\irrep{7371} & = & $(\irrep{10})(2)+(\irrepbar{16})(5)+(\irrep{16})(-1)+(\irrep{45})(8)+(\irrep{45})(-4)+(\irrep{54})(-4)+2(\irrep{120})(2)+(\irrep{126})(2)+(\irrepbar{144})(5)+2(\irrep{144})(-1)+(\irrepbar{144})(-7)+(\irrep{210})(-4)+(\irrep{320})(2)+(\irrepbar{560})(5)+(\irrep{560})(-1)+(\irrep{720})(-1)+(\irrep{945})(-4)+(\irrep{1200})(-1)+(\irrep{1728})(2)$\\
\irrep{7722} & = & $(\irrep{1})(-4)+(\irrep{10})(2)+(\irrep{10})(-10)+(\irrep{16})(-1)+(\irrepbar{16})(-7)+(\irrep{45})(-4)+(\irrep{54})(8)+(\irrep{54})(-4)+(\irrepbar{126})(2)+(\irrepbar{144})(5)+(\irrep{144})(-1)+(\irrepbar{144})(-7)+(\irrep{210})(-4)+(\irrep[1]{210})(2)+(\irrep{320})(2)+(\irrep{560})(-1)+(\irrepbar{720})(5)+(\irrep{720})(-1)+(\irrepbar{1050})(-4)+(\irrep{1440})(-1)+(\irrep{1728})(2)$\\
\irrep{17550} & = & $(\irrep{10})(2)+(\irrepbar{16})(5)+(\irrep{16})(-1)+(\irrep{45})(-4)+2(\irrep{120})(2)+(\irrep{126})(2)+(\irrepbar{126})(2)+(\irrepbar{144})(5)+2(\irrep{144})(-1)+(\irrep{210})(8)+(\irrep{210})(-4)+(\irrep{320})(2)+(\irrepbar{560})(5)+2(\irrep{560})(-1)+(\irrepbar{560})(-7)+(\irrep{770})(-4)+(\irrep{945})(-4)+(\irrep{1050})(-4)+(\irrepbar{1200})(5)+(\irrep{1200})(-1)+(\irrep{1728})(2)+(\irrep{2970})(2)+(\irrep{3696})(-1)$\\
\bottomrule
\rowcolor{white}\\[-\medskipamount]
\toprule
\rowcolor{tableheadcolor}
\E6& $\to$ &SU(6)${\times}$SU(2)\\
\midrule
\irrep{27} & = & $(\irrep{6},\irrep{2})+(\irrepbar{15},\irrep{1})$\\
\irrep{78} & = & $(\irrep{1},\irrep{3})+(\irrep{20},\irrep{2})+(\irrep{35},\irrep{1})$\\
\irrep{351} & = & $(\irrep{6},\irrep{2})+(\irrepbar{15},\irrep{3})+(\irrepbar{21},\irrep{1})+(\irrep{84},\irrep{2})+(\irrepbar{105},\irrep{1})$\\
\irrep[1]{351} & = & $(\irrepbar{15},\irrep{1})+(\irrepbar{21},\irrep{3})+(\irrep{84},\irrep{2})+(\irrepbar[1]{105},\irrep{1})$\\
\irrep{650} & = & $(\irrep{1},\irrep{1})+(\irrep{20},\irrep{2})+(\irrep{35},\irrep{1})+(\irrep{35},\irrep{3})+(\irrep{70},\irrep{2})+(\irrepbar{70},\irrep{2})+(\irrep{189},\irrep{1})$\\
\irrep{1728} & = & $(\irrep{6},\irrep{2})+(\irrep{6},\irrep{4})+(\irrepbar{15},\irrep{1})+(\irrepbar{15},\irrep{3})+(\irrep{84},\irrep{2})+(\irrepbar{105},\irrep{1})+(\irrepbar{105},\irrep{3})+(\irrep{120},\irrep{2})+(\irrep{210},\irrep{2})+(\irrepbar{384},\irrep{1})$\\
\irrep{2430} & = & $(\irrep{1},\irrep{1})+(\irrep{1},\irrep{5})+(\irrep{20},\irrep{2})+(\irrep{20},\irrep{4})+(\irrep{35},\irrep{3})+(\irrep{175},\irrep{3})+(\irrep{189},\irrep{1})+(\irrep{405},\irrep{1})+(\irrep{540},\irrep{2})$\\
\irrep{2925} & = & $(\irrep{1},\irrep{3})+(\irrep{20},\irrep{2})+(\irrep{20},\irrep{4})+(\irrep{35},\irrep{1})+(\irrep{35},\irrep{3})+(\irrep{70},\irrep{2})+(\irrepbar{70},\irrep{2})+(\irrep{175},\irrep{1})+(\irrep{189},\irrep{3})+(\irrep{280},\irrep{1})+(\irrepbar{280},\irrep{1})+(\irrep{540},\irrep{2})$\\
\irrep{3003} & = & $(\irrep{56},\irrep{4})+(\irrep{70},\irrep{2})+(\irrep{189},\irrep{1})+(\irrep{280},\irrep{3})+(\irrepbar{490},\irrep{1})+(\irrep{560},\irrep{2})$\\
\irrep{5824} & = & $(\irrep{20},\irrep{2})+(\irrep{35},\irrep{1})+(\irrep{35},\irrep{3})+(\irrep{56},\irrep{2})+(\irrep{70},\irrep{2})+(\irrepbar{70},\irrep{2})+(\irrep{70},\irrep{4})+(\irrep{189},\irrep{1})+(\irrep{189},\irrep{3})+(\irrep{280},\irrep{1})+(\irrep{280},\irrep{3})+(\irrep{540},\irrep{2})+(\irrep{560},\irrep{2})+(\irrepbar{896},\irrep{1})$\\
\irrep{7371} & = & $(\irrep{6},\irrep{2})+(\irrepbar{15},\irrep{1})+(\irrepbar{15},\irrep{3})+(\irrepbar{21},\irrep{1})+(\irrepbar{21},\irrep{3})+2(\irrep{84},\irrep{2})+(\irrep{84},\irrep{4})+(\irrepbar{105},\irrep{1})+(\irrepbar{105},\irrep{3})+(\irrepbar[1]{105},\irrep{3})+(\irrep{120},\irrep{2})+(\irrepbar[1]{210},\irrep{1})+(\irrep{210},\irrep{2})+(\irrep{336},\irrep{2})+(\irrepbar{384},\irrep{1})+(\irrepbar{384},\irrep{3})+(\irrep{840},\irrep{2})+(\irrepbar{1050},\irrep{1})$\\
\irrep{7722} & = & $(\irrep{6},\irrep{2})+(\irrepbar{15},\irrep{1})+(\irrep{84},\irrep{2})+(\irrepbar{105},\irrep{1})+(\irrepbar[1]{105},\irrep{1})+(\irrepbar{105},\irrep{3})+(\irrep{120},\irrep{2})+(\irrep{120},\irrep{4})+(\irrep{210},\irrep{2})+(\irrepbar[1]{210},\irrep{3})+(\irrepbar{384},\irrep{1})+(\irrepbar{384},\irrep{3})+(\irrep{420},\irrep{2})+(\irrep{840},\irrep{2})+(\irrepbar{1176},\irrep{1})$\\
\irrep{17550} & = & $(\irrep{6},\irrep{2})+(\irrep{6},\irrep{4})+(\irrepbar{15},\irrep{1})+(\irrepbar{15},\irrep{3})+(\irrepbar{15},\irrep{5})+(\irrepbar{21},\irrep{3})+2(\irrep{84},\irrep{2})+(\irrep{84},\irrep{4})+(\irrepbar{105},\irrep{1})+(\irrepbar[1]{105},\irrep{1})+2(\irrepbar{105},\irrep{3})+(\irrep{120},\irrep{2})+(\irrep{210},\irrep{2})+(\irrep{210},\irrep{4})+(\irrepbar{315},\irrep{1})+(\irrep{336},\irrep{2})+(\irrepbar{384},\irrep{1})+(\irrepbar{384},\irrep{3})+(\irrep{840},\irrep{2})+(\irrep[1]{840},\irrep{2})+(\irrepbar{1050},\irrep{1})+(\irrepbar{1050},\irrep{3})+(\irrep{1260},\irrep{2})+(\irrepbar{1701},\irrep{1})$\\
\bottomrule
\rowcolor{white}\\[-\medskipamount]
\toprule
\rowcolor{tableheadcolor}
\E6& $\to$ &SU(3)${\times}$SU(3)${\times}$SU(3)\\
\midrule
\irrep{27} & = & $(\irrep{3},\irrep{1},\irrep{3})+(\irrep{1},\irrep{3},\irrepbar{3})+(\irrepbar{3},\irrepbar{3},\irrep{1})$\\
\irrep{78} & = & $(\irrepbar{3},\irrep{3},\irrep{3})+(\irrep{3},\irrepbar{3},\irrepbar{3})+(\irrep{8},\irrep{1},\irrep{1})+(\irrep{1},\irrep{8},\irrep{1})+(\irrep{1},\irrep{1},\irrep{8})$\\
\irrep{351} & = & $(\irrep{3},\irrep{1},\irrep{3})+(\irrep{1},\irrep{3},\irrepbar{3})+(\irrepbar{3},\irrepbar{3},\irrep{1})+(\irrep{3},\irrep{1},\irrepbar{6})+(\irrepbar{6},\irrep{1},\irrep{3})+(\irrep{1},\irrep{3},\irrep{6})+(\irrep{1},\irrepbar{6},\irrepbar{3})+(\irrep{6},\irrepbar{3},\irrep{1})+(\irrepbar{3},\irrep{6},\irrep{1})+(\irrep{3},\irrep{8},\irrep{3})+(\irrep{8},\irrep{3},\irrepbar{3})+(\irrepbar{3},\irrepbar{3},\irrep{8})$\\
\irrep[1]{351} & = & $(\irrep{3},\irrep{1},\irrep{3})+(\irrep{1},\irrep{3},\irrepbar{3})+(\irrepbar{3},\irrepbar{3},\irrep{1})+(\irrepbar{6},\irrep{1},\irrepbar{6})+(\irrep{1},\irrepbar{6},\irrep{6})+(\irrep{6},\irrep{6},\irrep{1})+(\irrep{3},\irrep{8},\irrep{3})+(\irrep{8},\irrep{3},\irrepbar{3})+(\irrepbar{3},\irrepbar{3},\irrep{8})$\\
\irrep{650} & = & $2(\irrep{1},\irrep{1},\irrep{1})+2(\irrepbar{3},\irrep{3},\irrep{3})+2(\irrep{3},\irrepbar{3},\irrepbar{3})+(\irrep{8},\irrep{1},\irrep{1})+(\irrep{1},\irrep{8},\irrep{1})+(\irrep{1},\irrep{1},\irrep{8})+(\irrep{6},\irrep{3},\irrep{3})+(\irrepbar{3},\irrep{3},\irrepbar{6})+(\irrepbar{3},\irrepbar{6},\irrep{3})+(\irrep{3},\irrep{6},\irrepbar{3})+(\irrep{3},\irrepbar{3},\irrep{6})+(\irrepbar{6},\irrepbar{3},\irrepbar{3})+(\irrep{8},\irrep{8},\irrep{1})+(\irrep{8},\irrep{1},\irrep{8})+(\irrep{1},\irrep{8},\irrep{8})$\\
\irrep{1728} & = & $2(\irrep{3},\irrep{1},\irrep{3})+2(\irrep{1},\irrep{3},\irrepbar{3})+2(\irrepbar{3},\irrepbar{3},\irrep{1})+(\irrep{3},\irrep{1},\irrepbar{6})+(\irrepbar{6},\irrep{1},\irrep{3})+(\irrep{1},\irrep{3},\irrep{6})+(\irrep{1},\irrepbar{6},\irrepbar{3})+(\irrep{6},\irrepbar{3},\irrep{1})+(\irrepbar{3},\irrep{6},\irrep{1})+2(\irrep{3},\irrep{8},\irrep{3})+2(\irrep{8},\irrep{3},\irrepbar{3})+2(\irrepbar{3},\irrepbar{3},\irrep{8})+(\irrep{3},\irrep{8},\irrepbar{6})+(\irrepbar{6},\irrep{8},\irrep{3})+(\irrep{8},\irrep{3},\irrep{6})+(\irrep{8},\irrepbar{6},\irrepbar{3})+(\irrep{6},\irrepbar{3},\irrep{8})+(\irrepbar{3},\irrep{6},\irrep{8})+(\irrep{15},\irrep{1},\irrep{3})+(\irrep{3},\irrep{1},\irrep{15})+(\irrep{1},\irrep{15},\irrepbar{3})+(\irrep{1},\irrep{3},\irrepbar{15})+(\irrepbar{15},\irrepbar{3},\irrep{1})+(\irrepbar{3},\irrepbar{15},\irrep{1})$\\
\irrep{2430} & = & $(\irrep{1},\irrep{1},\irrep{1})+(\irrepbar{3},\irrep{3},\irrep{3})+(\irrep{3},\irrepbar{3},\irrepbar{3})+(\irrep{8},\irrep{1},\irrep{1})+(\irrep{1},\irrep{8},\irrep{1})+(\irrep{1},\irrep{1},\irrep{8})+(\irrep{6},\irrep{3},\irrep{3})+(\irrepbar{3},\irrep{3},\irrepbar{6})+(\irrepbar{3},\irrepbar{6},\irrep{3})+(\irrep{3},\irrep{6},\irrepbar{3})+(\irrep{3},\irrepbar{3},\irrep{6})+(\irrepbar{6},\irrepbar{3},\irrepbar{3})+(\irrep{8},\irrep{8},\irrep{1})+(\irrep{8},\irrep{1},\irrep{8})+(\irrep{1},\irrep{8},\irrep{8})+(\irrep{6},\irrepbar{6},\irrepbar{6})+(\irrepbar{6},\irrep{6},\irrep{6})+(\irrepbar{15},\irrep{3},\irrep{3})+(\irrepbar{3},\irrep{15},\irrep{3})+(\irrepbar{3},\irrep{3},\irrep{15})+(\irrep{15},\irrepbar{3},\irrepbar{3})+(\irrep{3},\irrepbar{15},\irrepbar{3})+(\irrep{3},\irrepbar{3},\irrepbar{15})+(\irrep{8},\irrep{8},\irrep{8})+(\irrep{27},\irrep{1},\irrep{1})+(\irrep{1},\irrep{27},\irrep{1})+(\irrep{1},\irrep{1},\irrep{27})$\\
\irrep{2925} & = & $(\irrep{1},\irrep{1},\irrep{1})+3(\irrepbar{3},\irrep{3},\irrep{3})+3(\irrep{3},\irrepbar{3},\irrepbar{3})+(\irrep{8},\irrep{1},\irrep{1})+(\irrep{1},\irrep{8},\irrep{1})+(\irrep{1},\irrep{1},\irrep{8})+(\irrep{6},\irrep{3},\irrep{3})+(\irrepbar{3},\irrep{3},\irrepbar{6})+(\irrepbar{3},\irrepbar{6},\irrep{3})+(\irrep{3},\irrep{6},\irrepbar{3})+(\irrep{3},\irrepbar{3},\irrep{6})+(\irrepbar{6},\irrepbar{3},\irrepbar{3})+(\irrep{10},\irrep{1},\irrep{1})+(\irrepbar{10},\irrep{1},\irrep{1})+(\irrep{1},\irrep{10},\irrep{1})+(\irrep{1},\irrepbar{10},\irrep{1})+(\irrep{1},\irrep{1},\irrep{10})+(\irrep{1},\irrep{1},\irrepbar{10})+(\irrep{6},\irrep{3},\irrepbar{6})+(\irrep{6},\irrepbar{6},\irrep{3})+(\irrepbar{3},\irrepbar{6},\irrepbar{6})+(\irrep{3},\irrep{6},\irrep{6})+(\irrepbar{6},\irrep{6},\irrepbar{3})+(\irrepbar{6},\irrepbar{3},\irrep{6})+2(\irrep{8},\irrep{8},\irrep{1})+2(\irrep{8},\irrep{1},\irrep{8})+2(\irrep{1},\irrep{8},\irrep{8})+(\irrepbar{15},\irrep{3},\irrep{3})+(\irrepbar{3},\irrep{15},\irrep{3})+(\irrepbar{3},\irrep{3},\irrep{15})+(\irrep{15},\irrepbar{3},\irrepbar{3})+(\irrep{3},\irrepbar{15},\irrepbar{3})+(\irrep{3},\irrepbar{3},\irrepbar{15})+(\irrep{8},\irrep{8},\irrep{8})$\\
\irrep{3003} & = & $(\irrep{1},\irrep{1},\irrep{1})+(\irrepbar{3},\irrep{3},\irrep{3})+(\irrep{3},\irrepbar{3},\irrepbar{3})+(\irrep{6},\irrep{3},\irrep{3})+(\irrepbar{3},\irrep{3},\irrepbar{6})+(\irrepbar{3},\irrepbar{6},\irrep{3})+(\irrep{3},\irrep{6},\irrepbar{3})+(\irrep{3},\irrepbar{3},\irrep{6})+(\irrepbar{6},\irrepbar{3},\irrepbar{3})+(\irrep{8},\irrep{8},\irrep{1})+(\irrep{8},\irrep{1},\irrep{8})+(\irrep{1},\irrep{8},\irrep{8})+(\irrep{10},\irrep{1},\irrep{10})+(\irrepbar{10},\irrepbar{10},\irrep{1})+(\irrep{1},\irrep{10},\irrepbar{10})+(\irrep{8},\irrep{8},\irrep{8})+(\irrep{6},\irrep{3},\irrep{15})+(\irrepbar{15},\irrepbar{6},\irrep{3})+(\irrepbar{3},\irrep{15},\irrepbar{6})+(\irrep{15},\irrepbar{3},\irrep{6})+(\irrep{3},\irrep{6},\irrepbar{15})+(\irrepbar{6},\irrepbar{15},\irrepbar{3})$\\
\irrep{5824} & = & $3(\irrepbar{3},\irrep{3},\irrep{3})+3(\irrep{3},\irrepbar{3},\irrepbar{3})+2(\irrep{8},\irrep{1},\irrep{1})+2(\irrep{1},\irrep{8},\irrep{1})+2(\irrep{1},\irrep{1},\irrep{8})+2(\irrep{6},\irrep{3},\irrep{3})+2(\irrepbar{3},\irrep{3},\irrepbar{6})+2(\irrepbar{3},\irrepbar{6},\irrep{3})+2(\irrep{3},\irrep{6},\irrepbar{3})+2(\irrep{3},\irrepbar{3},\irrep{6})+2(\irrepbar{6},\irrepbar{3},\irrepbar{3})+(\irrep{6},\irrep{3},\irrepbar{6})+(\irrep{6},\irrepbar{6},\irrep{3})+(\irrepbar{3},\irrepbar{6},\irrepbar{6})+(\irrep{3},\irrep{6},\irrep{6})+(\irrepbar{6},\irrep{6},\irrepbar{3})+(\irrepbar{6},\irrepbar{3},\irrep{6})+2(\irrep{8},\irrep{8},\irrep{1})+2(\irrep{8},\irrep{1},\irrep{8})+2(\irrep{1},\irrep{8},\irrep{8})+(\irrep{10},\irrep{1},\irrep{8})+(\irrep{8},\irrepbar{10},\irrep{1})+(\irrep{8},\irrep{1},\irrep{10})+(\irrepbar{10},\irrep{8},\irrep{1})+(\irrep{1},\irrep{10},\irrep{8})+(\irrep{1},\irrep{8},\irrepbar{10})+(\irrepbar{15},\irrep{3},\irrep{3})+(\irrepbar{3},\irrep{15},\irrep{3})+(\irrepbar{3},\irrep{3},\irrep{15})+(\irrep{15},\irrepbar{3},\irrepbar{3})+(\irrep{3},\irrepbar{15},\irrepbar{3})+(\irrep{3},\irrepbar{3},\irrepbar{15})+2(\irrep{8},\irrep{8},\irrep{8})+(\irrep{6},\irrep{3},\irrep{15})+(\irrepbar{15},\irrepbar{6},\irrep{3})+(\irrepbar{3},\irrep{15},\irrepbar{6})+(\irrep{15},\irrepbar{3},\irrep{6})+(\irrep{3},\irrep{6},\irrepbar{15})+(\irrepbar{6},\irrepbar{15},\irrepbar{3})$\\
\irrep{7371} & = & $3(\irrep{3},\irrep{1},\irrep{3})+3(\irrep{1},\irrep{3},\irrepbar{3})+3(\irrepbar{3},\irrepbar{3},\irrep{1})+2(\irrep{3},\irrep{1},\irrepbar{6})+2(\irrepbar{6},\irrep{1},\irrep{3})+2(\irrep{1},\irrep{3},\irrep{6})+2(\irrep{1},\irrepbar{6},\irrepbar{3})+2(\irrep{6},\irrepbar{3},\irrep{1})+2(\irrepbar{3},\irrep{6},\irrep{1})+(\irrepbar{6},\irrep{1},\irrepbar{6})+(\irrep{1},\irrepbar{6},\irrep{6})+(\irrep{6},\irrep{6},\irrep{1})+4(\irrep{3},\irrep{8},\irrep{3})+4(\irrep{8},\irrep{3},\irrepbar{3})+4(\irrepbar{3},\irrepbar{3},\irrep{8})+(\irrep{3},\irrep{10},\irrep{3})+(\irrep{3},\irrepbar{10},\irrep{3})+(\irrep{10},\irrep{3},\irrepbar{3})+(\irrepbar{10},\irrep{3},\irrepbar{3})+(\irrepbar{3},\irrepbar{3},\irrep{10})+(\irrepbar{3},\irrepbar{3},\irrepbar{10})+2(\irrep{3},\irrep{8},\irrepbar{6})+2(\irrepbar{6},\irrep{8},\irrep{3})+2(\irrep{8},\irrep{3},\irrep{6})+2(\irrep{8},\irrepbar{6},\irrepbar{3})+2(\irrep{6},\irrepbar{3},\irrep{8})+2(\irrepbar{3},\irrep{6},\irrep{8})+(\irrep{15},\irrep{1},\irrep{3})+(\irrep{3},\irrep{1},\irrep{15})+(\irrep{1},\irrep{15},\irrepbar{3})+(\irrep{1},\irrep{3},\irrepbar{15})+(\irrepbar{15},\irrepbar{3},\irrep{1})+(\irrepbar{3},\irrepbar{15},\irrep{1})+(\irrepbar{6},\irrep{8},\irrepbar{6})+(\irrep{8},\irrepbar{6},\irrep{6})+(\irrep{6},\irrep{6},\irrep{8})+(\irrep{15},\irrep{1},\irrepbar{6})+(\irrepbar{6},\irrep{1},\irrep{15})+(\irrep{1},\irrep{15},\irrep{6})+(\irrep{1},\irrepbar{6},\irrepbar{15})+(\irrep{6},\irrepbar{15},\irrep{1})+(\irrepbar{15},\irrep{6},\irrep{1})+(\irrep{15},\irrep{8},\irrep{3})+(\irrep{3},\irrep{8},\irrep{15})+(\irrep{8},\irrep{15},\irrepbar{3})+(\irrep{8},\irrep{3},\irrepbar{15})+(\irrepbar{15},\irrepbar{3},\irrep{8})+(\irrepbar{3},\irrepbar{15},\irrep{8})$\\
\irrep{7722} & = & $3(\irrep{3},\irrep{1},\irrep{3})+3(\irrep{1},\irrep{3},\irrepbar{3})+3(\irrepbar{3},\irrepbar{3},\irrep{1})+(\irrep{3},\irrep{1},\irrepbar{6})+(\irrepbar{6},\irrep{1},\irrep{3})+(\irrep{1},\irrep{3},\irrep{6})+(\irrep{1},\irrepbar{6},\irrepbar{3})+(\irrep{6},\irrepbar{3},\irrep{1})+(\irrepbar{3},\irrep{6},\irrep{1})+(\irrepbar{6},\irrep{1},\irrepbar{6})+(\irrep{1},\irrepbar{6},\irrep{6})+(\irrep{6},\irrep{6},\irrep{1})+3(\irrep{3},\irrep{8},\irrep{3})+3(\irrep{8},\irrep{3},\irrepbar{3})+3(\irrepbar{3},\irrepbar{3},\irrep{8})+2(\irrep{3},\irrep{8},\irrepbar{6})+2(\irrepbar{6},\irrep{8},\irrep{3})+2(\irrep{8},\irrep{3},\irrep{6})+2(\irrep{8},\irrepbar{6},\irrepbar{3})+2(\irrep{6},\irrepbar{3},\irrep{8})+2(\irrepbar{3},\irrep{6},\irrep{8})+(\irrep{15},\irrep{1},\irrep{3})+(\irrep{3},\irrep{10},\irrepbar{6})+(\irrep{3},\irrep{1},\irrep{15})+(\irrepbar{6},\irrepbar{10},\irrep{3})+(\irrep{10},\irrep{3},\irrep{6})+(\irrepbar{10},\irrepbar{6},\irrepbar{3})+(\irrep{1},\irrep{15},\irrepbar{3})+(\irrep{1},\irrep{3},\irrepbar{15})+(\irrep{6},\irrepbar{3},\irrep{10})+(\irrepbar{15},\irrepbar{3},\irrep{1})+(\irrepbar{3},\irrep{6},\irrepbar{10})+(\irrepbar{3},\irrepbar{15},\irrep{1})+(\irrepbar{6},\irrep{8},\irrepbar{6})+(\irrep{8},\irrepbar{6},\irrep{6})+(\irrep{6},\irrep{6},\irrep{8})+(\irrep{15},\irrep{8},\irrep{3})+(\irrep{3},\irrep{8},\irrep{15})+(\irrep{8},\irrep{15},\irrepbar{3})+(\irrep{8},\irrep{3},\irrepbar{15})+(\irrepbar{15},\irrepbar{3},\irrep{8})+(\irrepbar{3},\irrepbar{15},\irrep{8})+(\irrep{15},\irrep{1},\irrep{15})+(\irrep{1},\irrep{15},\irrepbar{15})+(\irrepbar{15},\irrepbar{15},\irrep{1})$\\
\irrep{17550} & = & $3(\irrep{3},\irrep{1},\irrep{3})+3(\irrep{1},\irrep{3},\irrepbar{3})+3(\irrepbar{3},\irrepbar{3},\irrep{1})+2(\irrep{3},\irrep{1},\irrepbar{6})+2(\irrepbar{6},\irrep{1},\irrep{3})+2(\irrep{1},\irrep{3},\irrep{6})+2(\irrep{1},\irrepbar{6},\irrepbar{3})+2(\irrep{6},\irrepbar{3},\irrep{1})+2(\irrepbar{3},\irrep{6},\irrep{1})+2(\irrepbar{6},\irrep{1},\irrepbar{6})+2(\irrep{1},\irrepbar{6},\irrep{6})+2(\irrep{6},\irrep{6},\irrep{1})+5(\irrep{3},\irrep{8},\irrep{3})+5(\irrep{8},\irrep{3},\irrepbar{3})+5(\irrepbar{3},\irrepbar{3},\irrep{8})+(\irrep{3},\irrep{10},\irrep{3})+(\irrep{3},\irrepbar{10},\irrep{3})+(\irrep{10},\irrep{3},\irrepbar{3})+(\irrepbar{10},\irrep{3},\irrepbar{3})+(\irrepbar{3},\irrepbar{3},\irrep{10})+(\irrepbar{3},\irrepbar{3},\irrepbar{10})+3(\irrep{3},\irrep{8},\irrepbar{6})+3(\irrepbar{6},\irrep{8},\irrep{3})+3(\irrep{8},\irrep{3},\irrep{6})+3(\irrep{8},\irrepbar{6},\irrepbar{3})+3(\irrep{6},\irrepbar{3},\irrep{8})+3(\irrepbar{3},\irrep{6},\irrep{8})+2(\irrep{15},\irrep{1},\irrep{3})+(\irrep{3},\irrepbar{10},\irrepbar{6})+2(\irrep{3},\irrep{1},\irrep{15})+(\irrepbar{6},\irrep{10},\irrep{3})+(\irrep{10},\irrepbar{6},\irrepbar{3})+(\irrepbar{10},\irrep{3},\irrep{6})+2(\irrep{1},\irrep{15},\irrepbar{3})+2(\irrep{1},\irrep{3},\irrepbar{15})+(\irrep{6},\irrepbar{3},\irrepbar{10})+2(\irrepbar{15},\irrepbar{3},\irrep{1})+(\irrepbar{3},\irrep{6},\irrep{10})+2(\irrepbar{3},\irrepbar{15},\irrep{1})+(\irrepbar{6},\irrep{8},\irrepbar{6})+(\irrep{8},\irrepbar{6},\irrep{6})+(\irrep{6},\irrep{6},\irrep{8})+(\irrep{15},\irrep{1},\irrepbar{6})+(\irrepbar{6},\irrep{1},\irrep{15})+(\irrep{1},\irrep{15},\irrep{6})+(\irrep{1},\irrepbar{6},\irrepbar{15})+(\irrep{6},\irrepbar{15},\irrep{1})+(\irrepbar{15},\irrep{6},\irrep{1})+2(\irrep{15},\irrep{8},\irrep{3})+2(\irrep{3},\irrep{8},\irrep{15})+2(\irrep{8},\irrep{15},\irrepbar{3})+2(\irrep{8},\irrep{3},\irrepbar{15})+2(\irrepbar{15},\irrepbar{3},\irrep{8})+2(\irrepbar{3},\irrepbar{15},\irrep{8})+(\irrep{24},\irrep{1},\irrep{3})+(\irrep{3},\irrep{1},\irrep{24})+(\irrep{1},\irrep{24},\irrepbar{3})+(\irrep{1},\irrep{3},\irrepbar{24})+(\irrepbar{24},\irrepbar{3},\irrep{1})+(\irrepbar{3},\irrepbar{24},\irrep{1})+(\irrep{15},\irrep{8},\irrepbar{6})+(\irrepbar{6},\irrep{8},\irrep{15})+(\irrep{8},\irrep{15},\irrep{6})+(\irrep{8},\irrepbar{6},\irrepbar{15})+(\irrep{6},\irrepbar{15},\irrep{8})+(\irrepbar{15},\irrep{6},\irrep{8})+(\irrep{3},\irrep{27},\irrep{3})+(\irrep{27},\irrep{3},\irrepbar{3})+(\irrepbar{3},\irrepbar{3},\irrep{27})$\\
\bottomrule
\end{longtable}
\newpage
}
\begin{longtable}{rcp{0.9\textwidth}}
\caption{\label{tab:E7BranchingRules}\E7 Branching Rules}\\
\endfirsthead
\caption[]{\E7 Branching Rules (continued)}\\
\endhead
\endfoot
\toprule
\rowcolor{tableheadcolor}
\E7& $\to$ &SU(8)\\
\midrule
\irrep{56} & = & $\irrep{28}+\irrepbar{28}$\\
\irrep{133} & = & $\irrep{63}+\irrep{70}$\\
\irrep{912} & = & $\irrep{36}+\irrepbar{36}+\irrep{420}+\irrepbar{420}$\\
\irrep{1463} & = & $\irrep{1}+\irrep{70}+\irrep{336}+\irrepbar{336}+\irrep{720}$\\
\irrep{1539} & = & $\irrep{63}+\irrep{378}+\irrepbar{378}+\irrep{720}$\\
\irrep{6480} & = & $\irrep{28}+\irrepbar{28}+\irrep{420}+\irrepbar{420}+\irrep{1280}+\irrepbar{1280}+\irrep{1512}+\irrepbar{1512}$\\
\irrep{7371} & = & $\irrep{1}+\irrep{70}+\irrep{720}+\irrep{1232}+\irrep{1764}+\irrep{3584}$\\
\irrep{8645} & = & $\irrep{63}+\irrep{378}+\irrepbar{378}+\irrep{945}+\irrepbar{945}+\irrep{2352}+\irrep{3584}$\\
\irrep{24320} & = & $\irrep{28}+\irrepbar{28}+\irrep{1512}+\irrepbar{1512}+\irrep[1]{2520}+\irrepbar[1]{2520}+\irrep{8100}+\irrepbar{8100}$\\
\irrep{27664} & = & $\irrep{36}+\irrepbar{36}+\irrep{420}+\irrepbar{420}+\irrep{1176}+\irrepbar{1176}+\irrep{1280}+\irrepbar{1280}+\irrep{2100}+\irrepbar{2100}+\irrep{8820}+\irrepbar{8820}$\\
\bottomrule
\rowcolor{white}\\[-\medskipamount]
\toprule
\rowcolor{tableheadcolor}
\E7& $\to$ &SO(12)${\times}$SU(2)\\
\midrule
\irrep{56} & = & $(\irrep{12},\irrep{2})+(\irrep{32},\irrep{1})$\\
\irrep{133} & = & $(\irrep{1},\irrep{3})+(\irrepbar{32},\irrep{2})+(\irrep{66},\irrep{1})$\\
\irrep{912} & = & $(\irrep{12},\irrep{2})+(\irrep{32},\irrep{3})+(\irrep{220},\irrep{2})+(\irrep{352},\irrep{1})$\\
\irrep{1463} & = & $(\irrep{66},\irrep{1})+(\irrep{77},\irrep{3})+(\irrepbar{352},\irrep{2})+(\irrep{462},\irrep{1})$\\
\irrep{1539} & = & $(\irrep{1},\irrep{1})+(\irrepbar{32},\irrep{2})+(\irrep{66},\irrep{3})+(\irrep{77},\irrep{1})+(\irrepbar{352},\irrep{2})+(\irrep{495},\irrep{1})$\\
\irrep{6480} & = & $(\irrep{12},\irrep{2})+(\irrep{12},\irrep{4})+(\irrep{32},\irrep{1})+(\irrep{32},\irrep{3})+(\irrep{220},\irrep{2})+(\irrep{352},\irrep{1})+(\irrep{352},\irrep{3})+(\irrep{560},\irrep{2})+(\irrep{792},\irrep{2})+(\irrep{1728},\irrep{1})$\\
\irrep{7371} & = & $(\irrep{1},\irrep{1})+(\irrep{1},\irrep{5})+(\irrepbar{32},\irrep{2})+(\irrepbar{32},\irrep{4})+(\irrep{66},\irrep{3})+(\irrepbar{462},\irrep{3})+(\irrep{495},\irrep{1})+(\irrep{1638},\irrep{1})+(\irrepbar{1728},\irrep{2})$\\
\irrep{8645} & = & $(\irrep{1},\irrep{3})+(\irrepbar{32},\irrep{2})+(\irrepbar{32},\irrep{4})+(\irrep{66},\irrep{1})+(\irrep{66},\irrep{3})+(\irrepbar{352},\irrep{2})+(\irrepbar{462},\irrep{1})+(\irrep{495},\irrep{3})+(\irrepbar{1728},\irrep{2})+(\irrep{2079},\irrep{1})$\\
\irrep{24320} & = & $(\irrep[1]{352},\irrep{4})+(\irrep{560},\irrep{2})+(\irrep{1728},\irrep{1})+(\irrep{2112},\irrep{3})+(\irrep{4224},\irrep{1})+(\irrep{4752},\irrep{2})$\\
\irrep{27664} & = & $(\irrep{12},\irrep{2})+(\irrep{32},\irrep{1})+(\irrep{32},\irrep{3})+(\irrep{220},\irrep{2})+(\irrep{220},\irrep{4})+(\irrep{352},\irrep{1})+(\irrep{352},\irrep{3})+(\irrep{560},\irrep{2})+(\irrep{792},\irrep{2})+(\irrep{1728},\irrep{3})+(\irrep{2112},\irrep{1})+(\irrep[1]{4928},\irrep{1})+(\irrep{4928},\irrep{2})$\\
\bottomrule
\rowcolor{white}\\[-\medskipamount]
\toprule
\rowcolor{tableheadcolor}
\E7& $\to$ &SU(6)${\times}$SU(3)\\
\midrule
\irrep{56} & = & $(\irrep{6},\irrep{3})+(\irrepbar{6},\irrepbar{3})+(\irrep{20},\irrep{1})$\\
\irrep{133} & = & $(\irrep{1},\irrep{8})+(\irrep{15},\irrepbar{3})+(\irrepbar{15},\irrep{3})+(\irrep{35},\irrep{1})$\\
\irrep{912} & = & $(\irrep{6},\irrep{3})+(\irrepbar{6},\irrepbar{3})+(\irrep{6},\irrepbar{6})+(\irrepbar{6},\irrep{6})+(\irrep{20},\irrep{8})+(\irrep{70},\irrep{1})+(\irrepbar{70},\irrep{1})+(\irrep{84},\irrep{3})+(\irrepbar{84},\irrepbar{3})$\\
\irrep{1463} & = & $(\irrep{1},\irrep{1})+(\irrep{15},\irrepbar{3})+(\irrepbar{15},\irrep{3})+(\irrep{21},\irrep{6})+(\irrepbar{21},\irrepbar{6})+(\irrep{35},\irrep{1})+(\irrep{35},\irrep{8})+(\irrep{105},\irrepbar{3})+(\irrepbar{105},\irrep{3})+(\irrep{175},\irrep{1})$\\
\irrep{1539} & = & $(\irrep{1},\irrep{1})+(\irrep{1},\irrep{8})+(\irrep{15},\irrepbar{3})+(\irrepbar{15},\irrep{3})+(\irrep{15},\irrep{6})+(\irrepbar{15},\irrepbar{6})+(\irrep{21},\irrepbar{3})+(\irrepbar{21},\irrep{3})+(\irrep{35},\irrep{1})+(\irrep{35},\irrep{8})+(\irrep{105},\irrepbar{3})+(\irrepbar{105},\irrep{3})+(\irrep{189},\irrep{1})$\\
\irrep{6480} & = & $2(\irrep{6},\irrep{3})+2(\irrepbar{6},\irrepbar{3})+(\irrep{6},\irrepbar{6})+(\irrepbar{6},\irrep{6})+2(\irrep{20},\irrep{1})+(\irrep{6},\irrep{15})+(\irrepbar{6},\irrepbar{15})+2(\irrep{20},\irrep{8})+(\irrep{70},\irrep{1})+(\irrepbar{70},\irrep{1})+(\irrep{70},\irrep{8})+(\irrepbar{70},\irrep{8})+2(\irrep{84},\irrep{3})+2(\irrepbar{84},\irrepbar{3})+(\irrep{84},\irrepbar{6})+(\irrepbar{84},\irrep{6})+(\irrep{120},\irrep{3})+(\irrepbar{120},\irrepbar{3})+(\irrep{210},\irrep{3})+(\irrepbar{210},\irrepbar{3})+(\irrep{540},\irrep{1})$\\
\irrep{7371} & = & $(\irrep{1},\irrep{1})+(\irrep{1},\irrep{8})+(\irrep{15},\irrepbar{3})+(\irrepbar{15},\irrep{3})+(\irrep{15},\irrep{6})+(\irrepbar{15},\irrepbar{6})+(\irrep{1},\irrep{27})+(\irrep{15},\irrepbar{15})+(\irrepbar{15},\irrep{15})+(\irrep{35},\irrep{1})+(\irrep{35},\irrep{8})+(\irrep{105},\irrepbar{3})+(\irrepbar{105},\irrep{3})+(\irrep[1]{105},\irrep{6})+(\irrepbar[1]{105},\irrepbar{6})+(\irrep{189},\irrep{1})+(\irrep{189},\irrep{8})+(\irrep{384},\irrepbar{3})+(\irrepbar{384},\irrep{3})+(\irrep{405},\irrep{1})$\\
\irrep{8645} & = & $(\irrep{1},\irrep{1})+(\irrep{1},\irrep{8})+(\irrep{1},\irrep{10})+(\irrep{1},\irrepbar{10})+2(\irrep{15},\irrepbar{3})+2(\irrepbar{15},\irrep{3})+(\irrep{15},\irrep{6})+(\irrepbar{15},\irrepbar{6})+(\irrep{21},\irrepbar{3})+(\irrepbar{21},\irrep{3})+(\irrep{15},\irrepbar{15})+(\irrepbar{15},\irrep{15})+(\irrep{35},\irrep{1})+2(\irrep{35},\irrep{8})+(\irrep{105},\irrepbar{3})+(\irrepbar{105},\irrep{3})+(\irrep[1]{105},\irrepbar{3})+(\irrepbar[1]{105},\irrep{3})+(\irrep{105},\irrep{6})+(\irrepbar{105},\irrepbar{6})+(\irrep{189},\irrep{1})+(\irrep{189},\irrep{8})+(\irrep{280},\irrep{1})+(\irrepbar{280},\irrep{1})+(\irrep{384},\irrepbar{3})+(\irrepbar{384},\irrep{3})$\\
\irrep{24320} & = & $(\irrep{6},\irrep{3})+(\irrepbar{6},\irrepbar{3})+2(\irrep{20},\irrep{1})+(\irrep{56},\irrep{10})+(\irrepbar{56},\irrepbar{10})+(\irrep{70},\irrep{8})+(\irrepbar{70},\irrep{8})+(\irrep{84},\irrep{3})+(\irrepbar{84},\irrepbar{3})+(\irrep{84},\irrepbar{6})+(\irrepbar{84},\irrep{6})+(\irrep{120},\irrep{3})+(\irrepbar{120},\irrepbar{3})+(\irrep{120},\irrep{15})+(\irrepbar{120},\irrepbar{15})+(\irrep{210},\irrep{3})+(\irrepbar{210},\irrepbar{3})+(\irrep{336},\irrepbar{6})+(\irrepbar{336},\irrep{6})+(\irrep{540},\irrep{1})+(\irrep{540},\irrep{8})+(\irrep[1]{840},\irrep{3})+(\irrepbar[1]{840},\irrepbar{3})+(\irrep{980},\irrep{1})$\\
\irrep{27664} & = & $2(\irrep{6},\irrep{3})+2(\irrepbar{6},\irrepbar{3})+2(\irrep{6},\irrepbar{6})+2(\irrepbar{6},\irrep{6})+2(\irrep{20},\irrep{1})+(\irrep{6},\irrep{15})+(\irrepbar{6},\irrepbar{15})+2(\irrep{20},\irrep{8})+(\irrep{20},\irrep{10})+(\irrep{20},\irrepbar{10})+(\irrep{56},\irrep{1})+(\irrepbar{56},\irrep{1})+(\irrep{70},\irrep{1})+(\irrepbar{70},\irrep{1})+2(\irrep{70},\irrep{8})+2(\irrepbar{70},\irrep{8})+3(\irrep{84},\irrep{3})+3(\irrepbar{84},\irrepbar{3})+(\irrep{84},\irrepbar{6})+(\irrepbar{84},\irrep{6})+(\irrep{84},\irrep{15})+(\irrepbar{84},\irrepbar{15})+(\irrep{120},\irrep{3})+(\irrepbar{120},\irrepbar{3})+(\irrep{120},\irrepbar{6})+(\irrepbar{120},\irrep{6})+(\irrep{210},\irrep{3})+(\irrepbar{210},\irrepbar{3})+(\irrep{210},\irrepbar{6})+(\irrepbar{210},\irrep{6})+(\irrep{336},\irrep{3})+(\irrepbar{336},\irrepbar{3})+(\irrep{540},\irrep{1})+(\irrep{540},\irrep{8})+(\irrep{560},\irrep{1})+(\irrepbar{560},\irrep{1})+(\irrep{840},\irrep{3})+(\irrepbar{840},\irrepbar{3})$\\
\bottomrule
\end{longtable}
\newpage
\begin{longtable}{rcp{0.9\textwidth}}
\caption{\label{tab:E8BranchingRules}\E8 Branching Rules}\\
\endfirsthead
\caption[]{\E8 Branching Rules (continued)}\\
\endhead
\endfoot
\toprule
\rowcolor{tableheadcolor}
\E8& $\to$ &SO(16)\\
\midrule
\irrep{248} & = & $\irrep{120}+\irrep{128}$\\
\irrep{3875} & = & $\irrep{135}+\irrep{1820}+\irrep{1920}$\\
\irrep{27000} & = & $\irrep{1}+\irrep{128}+\irrep{1820}+\irrep{5304}+\irrepbar{6435}+\irrep{13312}$\\
\irrep{30380} & = & $\irrep{120}+\irrep{1920}+\irrep{7020}+\irrep{8008}+\irrep{13312}$\\
\bottomrule
\rowcolor{white}\\[-\medskipamount]
\toprule
\rowcolor{tableheadcolor}
\E8& $\to$ &SU(9)\\
\midrule
\irrep{248} & = & $\irrep{80}+\irrep{84}+\irrepbar{84}$\\
\irrep{3875} & = & $\irrep{80}+\irrep{240}+\irrepbar{240}+\irrep{1050}+\irrepbar{1050}+\irrep{1215}$\\
\irrep{27000} & = & $\irrep{1}+\irrep{80}+\irrep{84}+\irrepbar{84}+\irrep{1050}+\irrepbar{1050}+\irrep{1215}+\irrep{1944}+\irrep{2520}+\irrepbar{2520}+\irrep{5346}+\irrepbar{5346}+\irrep{5760}$\\
\irrep{30380} & = & $\irrep{1}+\irrep{80}+\irrep{84}+\irrepbar{84}+\irrep{240}+\irrepbar{240}+\irrep{1050}+\irrepbar{1050}+\irrep{1215}+\irrep{1540}+\irrepbar{1540}+\irrep{3402}+\irrepbar{3402}+\irrep{5346}+\irrepbar{5346}+\irrep{5760}$\\
\bottomrule
\rowcolor{white}\\[-\medskipamount]
\toprule
\rowcolor{tableheadcolor}
\E8& $\to$ &\E7${\times}$SU(2)\\
\midrule
\irrep{248} & = & $(\irrep{1},\irrep{3})+(\irrep{56},\irrep{2})+(\irrep{133},\irrep{1})$\\
\irrep{3875} & = & $(\irrep{1},\irrep{1})+(\irrep{56},\irrep{2})+(\irrep{133},\irrep{3})+(\irrep{912},\irrep{2})+(\irrep{1539},\irrep{1})$\\
\irrep{27000} & = & $(\irrep{1},\irrep{1})+(\irrep{1},\irrep{5})+(\irrep{56},\irrep{2})+(\irrep{56},\irrep{4})+(\irrep{133},\irrep{3})+(\irrep{1463},\irrep{3})+(\irrep{1539},\irrep{1})+(\irrep{6480},\irrep{2})+(\irrep{7371},\irrep{1})$\\
\irrep{30380} & = & $(\irrep{1},\irrep{3})+(\irrep{56},\irrep{2})+(\irrep{56},\irrep{4})+(\irrep{133},\irrep{1})+(\irrep{133},\irrep{3})+(\irrep{912},\irrep{2})+(\irrep{1463},\irrep{1})+(\irrep{1539},\irrep{3})+(\irrep{6480},\irrep{2})+(\irrep{8645},\irrep{1})$\\
\bottomrule
\rowcolor{white}\\[-\medskipamount]
\toprule
\rowcolor{tableheadcolor}
\E8& $\to$ &\E6${\times}$SU(3)\\
\midrule
\irrep{248} & = & $(\irrep{1},\irrep{8})+(\irrep{27},\irrep{3})+(\irrepbar{27},\irrepbar{3})+(\irrep{78},\irrep{1})$\\
\irrep{3875} & = & $(\irrep{1},\irrep{1})+(\irrep{1},\irrep{8})+(\irrep{27},\irrep{3})+(\irrepbar{27},\irrepbar{3})+(\irrep{27},\irrepbar{6})+(\irrepbar{27},\irrep{6})+(\irrep{78},\irrep{8})+(\irrep{351},\irrep{3})+(\irrepbar{351},\irrepbar{3})+(\irrep{650},\irrep{1})$\\
\irrep{27000} & = & $(\irrep{1},\irrep{1})+(\irrep{1},\irrep{8})+(\irrep{1},\irrep{27})+(\irrep{27},\irrep{3})+(\irrepbar{27},\irrepbar{3})+(\irrep{27},\irrepbar{6})+(\irrepbar{27},\irrep{6})+(\irrep{27},\irrep{15})+(\irrepbar{27},\irrepbar{15})+(\irrep{78},\irrep{1})+(\irrep{78},\irrep{8})+(\irrep{351},\irrep{3})+(\irrepbar{351},\irrepbar{3})+(\irrep[1]{351},\irrepbar{6})+(\irrepbar[1]{351},\irrep{6})+(\irrep{650},\irrep{1})+(\irrep{650},\irrep{8})+(\irrep{1728},\irrep{3})+(\irrepbar{1728},\irrepbar{3})+(\irrep{2430},\irrep{1})$\\
\irrep{30380} & = & $(\irrep{1},\irrep{1})+(\irrep{1},\irrep{8})+(\irrep{1},\irrep{10})+(\irrep{1},\irrepbar{10})+2(\irrep{27},\irrep{3})+2(\irrepbar{27},\irrepbar{3})+(\irrep{27},\irrepbar{6})+(\irrepbar{27},\irrep{6})+(\irrep{27},\irrep{15})+(\irrepbar{27},\irrepbar{15})+(\irrep{78},\irrep{1})+2(\irrep{78},\irrep{8})+(\irrep{351},\irrep{3})+(\irrepbar{351},\irrepbar{3})+(\irrep[1]{351},\irrep{3})+(\irrepbar[1]{351},\irrepbar{3})+(\irrep{351},\irrepbar{6})+(\irrepbar{351},\irrep{6})+(\irrep{650},\irrep{1})+(\irrep{650},\irrep{8})+(\irrep{1728},\irrep{3})+(\irrepbar{1728},\irrepbar{3})+(\irrep{2925},\irrep{1})$\\
\bottomrule
\rowcolor{white}\\[-\medskipamount]
\toprule
\rowcolor{tableheadcolor}
\E8& $\to$ &SU(5)${\times}$SU(5)\\
\midrule
\irrep{248} & = & $(\irrep{5},\irrep{10})+(\irrepbar{10},\irrep{5})+(\irrep{10},\irrepbar{5})+(\irrepbar{5},\irrepbar{10})+(\irrep{24},\irrep{1})+(\irrep{1},\irrep{24})$\\
\irrep{3875} & = & $(\irrep{1},\irrep{1})+(\irrep{5},\irrep{10})+(\irrepbar{10},\irrep{5})+(\irrep{10},\irrepbar{5})+(\irrepbar{5},\irrepbar{10})+(\irrep{5},\irrep{15})+(\irrepbar{15},\irrep{5})+(\irrep{15},\irrepbar{5})+(\irrepbar{5},\irrepbar{15})+(\irrep{24},\irrep{1})+(\irrep{1},\irrep{24})+(\irrep{5},\irrep{40})+(\irrepbar{40},\irrep{5})+(\irrep{40},\irrepbar{5})+(\irrepbar{5},\irrepbar{40})+(\irrep{24},\irrep{24})+(\irrep{45},\irrep{10})+(\irrepbar{10},\irrep{45})+(\irrep{10},\irrepbar{45})+(\irrepbar{45},\irrepbar{10})+(\irrep{75},\irrep{1})+(\irrep{1},\irrep{75})$\\
\irrep{27000} & = & $2(\irrep{1},\irrep{1})+2(\irrep{5},\irrep{10})+2(\irrepbar{10},\irrep{5})+2(\irrep{10},\irrepbar{5})+2(\irrepbar{5},\irrepbar{10})+(\irrep{5},\irrep{15})+(\irrepbar{15},\irrep{5})+(\irrep{15},\irrepbar{5})+(\irrepbar{5},\irrepbar{15})+2(\irrep{24},\irrep{1})+2(\irrep{1},\irrep{24})+(\irrep{5},\irrep{40})+(\irrepbar{40},\irrep{5})+(\irrep{40},\irrepbar{5})+(\irrepbar{5},\irrepbar{40})+2(\irrep{24},\irrep{24})+2(\irrep{45},\irrep{10})+2(\irrepbar{10},\irrep{45})+2(\irrep{10},\irrepbar{45})+2(\irrepbar{45},\irrepbar{10})+(\irrep{50},\irrep{15})+(\irrepbar{15},\irrep{50})+(\irrep{15},\irrepbar{50})+(\irrepbar{50},\irrepbar{15})+(\irrep{75},\irrep{1})+(\irrep{1},\irrep{75})+(\irrep{70},\irrep{10})+(\irrepbar{10},\irrep{70})+(\irrep{10},\irrepbar{70})+(\irrepbar{70},\irrepbar{10})+(\irrep{45},\irrep{40})+(\irrepbar{40},\irrep{45})+(\irrep{40},\irrepbar{45})+(\irrepbar{45},\irrepbar{40})+(\irrep{24},\irrep{75})+(\irrep{75},\irrep{24})+(\irrep{5},\irrep{175})+(\irrepbar{175},\irrep{5})+(\irrep{
175},\irrepbar{5})+(\irrepbar{5},\irrepbar{175})+(\irrep{200},\irrep{1})+(\irrep{1},\irrep{200})$\\
\irrep{30380} & = & $2(\irrep{1},\irrep{1})+3(\irrep{5},\irrep{10})+3(\irrepbar{10},\irrep{5})+3(\irrep{10},\irrepbar{5})+3(\irrepbar{5},\irrepbar{10})+(\irrep{5},\irrep{15})+(\irrepbar{15},\irrep{5})+(\irrep{15},\irrepbar{5})+(\irrepbar{5},\irrepbar{15})+2(\irrep{24},\irrep{1})+2(\irrep{1},\irrep{24})+2(\irrep{5},\irrep{40})+2(\irrepbar{40},\irrep{5})+2(\irrep{40},\irrepbar{5})+2(\irrepbar{5},\irrepbar{40})+3(\irrep{24},\irrep{24})+2(\irrep{45},\irrep{10})+2(\irrepbar{10},\irrep{45})+2(\irrep{10},\irrepbar{45})+2(\irrepbar{45},\irrepbar{10})+(\irrep{45},\irrep{15})+(\irrepbar{15},\irrep{45})+(\irrep{15},\irrepbar{45})+(\irrepbar{45},\irrepbar{15})+(\irrep{50},\irrep{10})+(\irrepbar{10},\irrep{50})+(\irrep{10},\irrepbar{50})+(\irrepbar{50},\irrepbar{10})+(\irrep{75},\irrep{1})+(\irrep{1},\irrep{75})+(\irrep{70},\irrep{10})+(\irrepbar{10},\irrep{70})+(\irrep{10},\irrepbar{70})+(\irrepbar{70},\irrepbar{10})+(\irrep{45},\irrep{40})+(\irrepbar{40},\irrep{45})+(\irrep{40},\irrepbar{45})+(\irrepbar{45},\irrepbar{
40})+(\irrep{24},\irrep{75})+(\irrep{75},\irrep{24})+(\irrep{126},\irrep{1})+(\irrepbar{126},\irrep{1})+(\irrep{1},\irrep{126})+(\irrep{1},\irrepbar{126})+(\irrep{5},\irrep{175})+(\irrepbar{175},\irrep{5})+(\irrep{175},\irrepbar{5})+(\irrepbar{5},\irrepbar{175})$\\
\bottomrule
\end{longtable}
\section{Conclusions and Outlook}
\label{ConclusionsAndOutlook}
We have programmed the Mathematica application LieART, which brings Lie algebra and representation theory related computations to Mathematica.
It provides functions for the decomposition of tensor products and branching rules of irreducible representations, which are of high interest
in particle physics, especially unified model building. LieART exploits the Weyl reflection group in most of its applications, making it fast
and memory efficient. The user interface focuses on usability, allowing one to enter irreducible representations by their dimensional name and
giving results in textbook style. We have reproduced and extended existing tabulated data on irreducible representations, their tensor products
and branching rules.
In future versions we plan to add more branching rules to LieART. Currently, only a selection of common branching rules used in the tables are implemented.
We consider the tables given in the appendix as dynamical: They are included in LieART as Mathematica notebooks and can easily modified and extended by the user.
Tables for algebras of high rank and/or higher dimensional irreducible representations have large CPU time and high memory consumption. Nevertheless, we
plan to extend the tables even further and make them available online in a standard format (pdf and/or html).
\section{Download and Installation}
\label{sec:DownloadAndInstallation}
\subsection{Download}
LieART is hosted by Hepforge, IPPP Durham. The LieART project home page is
\href{http://lieart.hepforge.org/}{\texttt{http://lieart.hepforge.org/}}
and the LieART Mathematica application can be downloaded as tar.gz archive from
\href{http://www.hepforge.org/downloads/lieart/}{\texttt{http://www.hepforge.org/downloads/lieart/}}
\subsection{Automatic Installation (Mathematica 8 only)}
Start Mathematica and in the front end select the menu entry
\newcommand\nextstep{$\mathtt{\;\to\;}$}
\texttt{File}\nextstep\texttt{Install\ldots}
In the appearing dialog select \texttt{Application} as \texttt{Type of Item to
Install} and the archive file in the open file dialog from \texttt{Source}.
Choose whether you want to install LieART for an individual user or system wide. For
a system-wide installation you might be asked for the superuser password.
\subsection{Manual Installation}
The above procedure in Mathematica 7 only allows you to automatically install the
Mathematica package file (\texttt{LieART.m}) of LieART without the documentation. We therefore
suggest a manual installation of the LieART application in Mathematica 7 and
in Mathematica 8 if problems with the automatic installation occur.
Extract the archive to the subdirectory \texttt{AddOns/Applications} of the
directory to which \linebreak\texttt{\$UserBaseDirectory} is set for a user-only
installation. For a system-wide installation place it in the according
subdirectory of \texttt{\$InstallationDirectory}. Restart Mathematica to allow
it to integrate LieART's documentation in its help system.
\subsection{Documentation}
The documentation of LieART is integrated in Mathematica's help system. After
restarting Mathematica the following path leads to LieART's documentation:
\texttt{Help}\newline
\nextstep\texttt{Documentation Center}\newline
\hphantom{\nextstep}\nextstep\texttt{Add-Ons\:\&\:Packages} (on the bottom left)\newline
\hphantom{\nextstep\nextstep}\nextstep\texttt{LieART}, Button labeled "\texttt{Documentation}"
(Alternatively, a search for ``LieART'' (with the correct case) in the Documentation Center leads to the same
page.) The displayed page serves as the documentation home of LieART and includes links
to the descriptions of its most important functions.
The documentation of LieART includes a \texttt{Quick Start Tutorial} for the impatient,
which can be found near the bottom of LieART's documentation home under the
section \texttt{Tutorials}.
Tables of representation properties, tensor products and branching rules
generated by LieART can be found in the section \texttt{Tables} at the bottom of
LieART's documentation home.
\subsection{\LaTeX\ Package}
LieART comes with a \LaTeX\ package that defines commands to display irreps, roots and weights properly.
The style file \texttt{lieart.sty} can be found in the subdirectory \texttt{latex/} of the LieART project tree.
Please copy it to a location where your \LaTeX\ installation can find it.
\section{Introduction}
Lie groups are a key ingredient in modern physics, while smaller Lie groups like
\SU2 and \SO{3,1}, enter the quantum mechanics of elementary chemistry and
condensed matter physics, the full spectrum of Lie groups, i.e., the classical
groups \SU{N}, \SO{N} and \Sp{2N} and the exceptionals \E6, \E7, \E8, \F4 and
\G2, have all appeared with varying degrees of frequency in particle physics.
Lie groups have many other application e.g., to the theoretical physics of
gravity, string theory, etc. as well as applications to engineering and
elsewhere. Here we will focus on the Lie algebras of the compact forms of Lie groups that
are most useful for particle physics. Most of the results are
easily extended to the non-compact forms.
Shortly after the standard model was completed, grand unified theories
were proposed, where the standard model gauge group
$\SU{3}_\text{C}{\times}\SU{2}_\text{L}{\times}\U{1}_\text{Y}$ is embedded in a
higher symmetry, typically \SU5 \cite{Georgi:1974sy}, \SO{10}
\cite{Fritzsch:1974nn} or \E6 \cite{Gursey:1975ki}, although other choices have
been tried. Major reviews appeared on the uses of Lie algebras \cite{Slansky,
McKay:99021}, including tables of irreducible representations (irreps) and their
invariants. There are also a number of useful textbooks that cover the topic
\cite{Georgi:1982jb, Ramond:2010zz, cahn1984semi}. While extensive tables
already exist for building GUT models, it has sometimes been necessary to go
beyond what is tabulated in the literature. Our purpose here is to give extended
tables that will satisfy most modern model building requirements, but also
provide the software that allows one to go further as the situation may require.
In describing the software we will incorporate a review of most of the necessary
group theory background. This includes root and weight systems, the associated
Weyl groups for all the classical and exceptional Lie algebras, orthogonal basis
systems, and Weyl group orbits, which are used in our method of calculating
tensor products and irrep decompositions.
The theory of Lie algebras is in a mature state and many algorithms have been
established to facilitate computations in representation theory. The
correspondence of irreps to Young tableaux, especially for \SU{N}'s, with
algorithms for decomposing tensor products and subalgebra decomposition, even allows
complex calculations involving high-dimensional irreps by hand. Lie algebra
related computations have been implemented
multiple times on the computer in many different programming languages. Popular programs with a
similar aim as LieART are \cite{LiE,Schur,Simplie}. However, at the time we started the project no such
implementation existed for Mathematica. (Meanwhile a package for computations in
finite-dimensional and affine Lie algebras has been published~\cite{Nazarov:2011mv},
that has a similar intention as our software). Mathematica is a computer algebra
software by Wolfram Research, Inc. which is widely used especially among particle physicists.
Originally intended as an in-house solution for a computerized grand unified
model scan of \SU{N}'s in Mathematica \cite{Albright:2012zt}, we present here
the Mathematica application LieART (\underline{Lie} \underline{A}lgebras and
\underline{R}epresentation \underline{T}heory), that makes tensor products and
subalgebra branching of irreps of the classical and exceptional Lie algebras
available for this platform. LieART's code exploits the Weyl reflection group,
inherent in all simple Lie algebras, in many parts of the algorithms, which
makes computations fast and at the same time economical on memory. We also
focused on the usability of LieART with a particle physicist as user in mind:
Irreps can be entered by their dimensional name, a nomenclature that physicists
prefer over the more unique Dynkin label. LieART displays results in textbook
style used in most particle-physics publications, e.g., \irrepbar{10} for the
conjugated 10-dimensional irrep of \SU5 instead of the corresponding Dynkin
label \dynkin{0,0,1,0}. The Dynkin label is used internally, but can also be
used as input and output. LieART can also display results in terms of \LaTeX\
commands, that are defined in a supplemental \LaTeX\ style file for the
inclusion of results in publications.
In Section~\ref{sec:DownloadAndInstallation} we give instructions for
downloading and installing LieART, as well as locating its documentation
integrated in Mathematica's help system. Section~\ref{sec:QuickStart} comprises
a quick-start tutorial for LieART, introducing the most important functions for
the most common tasks in an example-based fashion.
Section~\ref{sec:TheoryAndImplementation} presents a self-contained overview of
the Lie algebra theory used in LieART and gives notes on its implementation.
In Section~\ref{LaTeXPackage} we present a \LaTeX\ style file included in LieART
for displaying weights, roots and irreps properly. In
Section~\ref{ConclusionsAndOutlook} we conclude and give an outlook on future
versions. In the appendix we include an extensive collection of tables with properties
of irreps, tensor products and branching rules. These tables follow \cite{Slansky}
in selection and presentation style, but extend most of the results. We plan
to maintain and further extend our tables, which can be used directly as lookup
tables without the aid of LieART.
\subsubsection{\SU{N}}
\enlargethispage{10pt}
\begin{longtable}{lrrc}
\caption{SU(2) Irreps\label{tab:SU2Irreps}}\\
\toprule
\rowcolor{tableheadcolor}
Dynkin & Dimension & l & \\
\rowcolor{tableheadcolor}label & (name) & (index) & Duality\\
\midrule
\endfirsthead
\caption[]{SU(2) Irreps (continued)}\\
\toprule
\rowcolor{tableheadcolor}
Dynkin & Dimension & l & \\
\rowcolor{tableheadcolor}label & (name) & (index) & Duality\\
\midrule
\endhead
\endfoot
\bottomrule
\endlastfoot
\dynkin{1} & \irrep{2} & 1 & 1\\
\dynkin{2} & \irrep{3} & 4 & 0\\
\dynkin{3} & \irrep{4} & 10 & 1\\
\dynkin{4} & \irrep{5} & 20 & 0\\
\dynkin{5} & \irrep{6} & 35 & 1\\
\dynkin{6} & \irrep{7} & 56 & 0\\
\dynkin{7} & \irrep{8} & 84 & 1\\
\dynkin{8} & \irrep{9} & 120 & 0\\
\dynkin{9} & \irrep{10} & 165 & 1\\
\dynkincomma{10} & \irrep{11} & 220 & 0\\
\dynkincomma{11} & \irrep{12} & 286 & 1\\
\dynkincomma{12} & \irrep{13} & 364 & 0\\
\dynkincomma{13} & \irrep{14} & 455 & 1\\
\dynkincomma{14} & \irrep{15} & 560 & 0\\
\dynkincomma{15} & \irrep{16} & 680 & 1\\
\dynkincomma{16} & \irrep{17} & 816 & 0\\
\dynkincomma{17} & \irrep{18} & 969 & 1\\
\dynkincomma{18} & \irrep{19} & 1140 & 0\\
\dynkincomma{19} & \irrep{20} & 1330 & 1\\
\dynkincomma{20} & \irrep{21} & 1540 & 0\\
\dynkincomma{21} & \irrep{22} & 1771 & 1\\
\dynkincomma{22} & \irrep{23} & 2024 & 0\\
\dynkincomma{23} & \irrep{24} & 2300 & 1\\
\dynkincomma{24} & \irrep{25} & 2600 & 0\\
\dynkincomma{25} & \irrep{26} & 2925 & 1\\
\dynkincomma{26} & \irrep{27} & 3276 & 0\\
\dynkincomma{27} & \irrep{28} & 3654 & 1\\
\dynkincomma{28} & \irrep{29} & 4060 & 0\\
\dynkincomma{29} & \irrep{30} & 4495 & 1\\
\dynkincomma{30} & \irrep{31} & 4960 & 0\\
\dynkincomma{31} & \irrep{32} & 5456 & 1\\
\dynkincomma{32} & \irrep{33} & 5984 & 0\\
\dynkincomma{33} & \irrep{34} & 6545 & 1\\
\dynkincomma{34} & \irrep{35} & 7140 & 0\\
\dynkincomma{35} & \irrep{36} & 7770 & 1\\
\dynkincomma{36} & \irrep{37} & 8436 & 0\\
\dynkincomma{37} & \irrep{38} & 9139 & 1\\
\dynkincomma{38} & \irrep{39} & 9880 & 0\\
\dynkincomma{39} & \irrep{40} & 10660 & 1\\
\dynkincomma{40} & \irrep{41} & 11480 & 0\\
\dynkincomma{41} & \irrep{42} & 12341 & 1\\
\dynkincomma{42} & \irrep{43} & 13244 & 0\\
\dynkincomma{43} & \irrep{44} & 14190 & 1\\
\dynkincomma{44} & \irrep{45} & 15180 & 0\\
\dynkincomma{45} & \irrep{46} & 16215 & 1\\
\dynkincomma{46} & \irrep{47} & 17296 & 0\\
\end{longtable}
\newpage
\begin{longtable}{lrrcc}
\caption{\label{tab:SU3Irreps}SU(3) Irreps}\\
\toprule
\rowcolor{tableheadcolor}
Dynkin & Dimension & l & & SU(2)\\
\rowcolor{tableheadcolor}label & (name) & (index) & Triality & singlets\\
\midrule
\endfirsthead
\caption[]{SU(3) Irreps (continued)}\\
\toprule
\rowcolor{tableheadcolor}
Dynkin & Dimension & l & & SU(2)\\
\rowcolor{tableheadcolor}label & (name) & (index) & Triality & singlets\\
\midrule
\endhead
\multicolumn{5}{l}{\footnotesize $^\ast$SU(2)${\times}$U(1) singlet.}
\endfoot
\bottomrule
\multicolumn{5}{l}{\footnotesize $^\ast$SU(2)${\times}$U(1) singlet.}
\endlastfoot
\dynkin{1, 0} & \irrep{3} & 1 & 1 & 1\\
\dynkin{2, 0} & \irrep{6} & 5 & 2 & 1\\
\dynkin{1, 1} & \irrep{8} & 6 & 0 & \starred{1}\\
\dynkin{3, 0} & \irrep{10} & 15 & 0 & 1\\
\dynkin{2, 1} & \irrep{15} & 20 & 1 & 1\\
\dynkin{4, 0} & \irrep[1]{15} & 35 & 1 & 1\\
\dynkin{0, 5} & \irrep{21} & 70 & 1 & 1\\
\dynkin{1, 3} & \irrep{24} & 50 & 1 & 1\\
\dynkin{2, 2} & \irrep{27} & 54 & 0 & \starred{1}\\
\dynkin{6, 0} & \irrep{28} & 126 & 0 & 1\\
\dynkin{4, 1} & \irrep{35} & 105 & 0 & 1\\
\dynkin{7, 0} & \irrep{36} & 210 & 1 & 1\\
\dynkin{3, 2} & \irrep{42} & 119 & 1 & 1\\
\dynkin{0, 8} & \irrep{45} & 330 & 1 & 1\\
\dynkin{5, 1} & \irrep{48} & 196 & 1 & 1\\
\dynkin{9, 0} & \irrep{55} & 495 & 0 & 1\\
\dynkin{2, 4} & \irrep{60} & 230 & 1 & 1\\
\dynkin{1, 6} & \irrep{63} & 336 & 1 & 1\\
\dynkin{3, 3} & \irrep{64} & 240 & 0 & \starred{1}\\
\dynkincomma{10, 0} & \irrep{66} & 715 & 1 & 1\\
\dynkincomma{0, 11} & \irrep{78} & 1001 & 1 & 1\\
\dynkin{7, 1} & \irrep{80} & 540 & 0 & 1\\
\dynkin{5, 2} & \irrep{81} & 405 & 0 & 1\\
\dynkin{4, 3} & \irrep{90} & 435 & 1 & 1\\
\dynkincomma{12, 0} & \irrep{91} & 1365 & 0 & 1\\
\dynkin{8, 1} & \irrep{99} & 825 & 1 & 1\\
\dynkin{6, 2} & \irrep{105} & 665 & 1 & 1\\
\dynkincomma{13, 0} & \irrep[1]{105} & 1820 & 1 & 1\\
\dynkin{3, 5} & \irrep{120} & 730 & 1 & 1\\
\dynkin{1, 9} & \irrep[1]{120} & 1210 & 1 & 1\\
\dynkincomma{0, 14} & \irrep[2]{120} & 2380 & 1 & 1\\
\dynkin{4, 4} & \irrep{125} & 750 & 0 & \starred{1}\\
\dynkin{2, 7} & \irrep{132} & 1034 & 1 & 1\\
\dynkincomma{15, 0} & \irrep{136} & 3060 & 0 & 1\\
\dynkincomma{10, 1} & \irrep{143} & 1716 & 0 & 1\\
\dynkincomma{16, 0} & \irrep{153} & 3876 & 1 & 1\\
\dynkin{6, 3} & \irrep{154} & 1155 & 0 & 1\\
\dynkin{8, 2} & \irrep{162} & 1539 & 0 & 1\\
\dynkin{5, 4} & \irrep{165} & 1210 & 1 & 1\\
\dynkincomma{11, 1} & \irrep{168} & 2366 & 1 & 1\\
\dynkincomma{0, 17} & \irrep{171} & 4845 & 1 & 1\\
\dynkincomma{18, 0} & \irrep{190} & 5985 & 0 & 1\\
\dynkin{7, 3} & \irrep{192} & 1744 & 1 & 1\\
\dynkin{9, 2} & \irrep{195} & 2210 & 1 & 1\\
\dynkincomma{1, 12} & \irrep[1]{195} & 3185 & 1 & 1\\
\dynkin{4, 6} & \irrep{210} & 1855 & 1 & 1\\
\dynkincomma{19, 0} & \irrep[1]{210} & 7315 & 1 & 1\\
\dynkin{5, 5} & \irrep{216} & 1890 & 0 & \starred{1}\\
\dynkincomma{13, 1} & \irrep{224} & 4200 & 0 & 1\\
\dynkincomma{2, 10} & \irrep{231} & 3080 & 1 & 1\\
\dynkincomma{0, 20} & \irrep[1]{231} & 8855 & 1 & 1\\
\dynkin{3, 8} & \irrep{234} & 2535 & 1 & 1\\
\dynkincomma{21, 0} & \irrep{253} & 10626 & 0 & 1\\
\dynkincomma{14, 1} & \irrep{255} & 5440 & 1 & 1\\
\dynkin{7, 4} & \irrep{260} & 2730 & 0 & 1\\
\dynkincomma{11, 2} & \irrep{270} & 4185 & 0 & 1\\
\dynkin{6, 5} & \irrep{273} & 2821 & 1 & 1\\
\dynkincomma{22, 0} & \irrep{276} & 12650 & 1 & 1\\
\dynkin{9, 3} & \irrep{280} & 3570 & 0 & 1\\
\dynkincomma{1, 15} & \irrep{288} & 6936 & 1 & 1\\
\dynkincomma{0, 23} & \irrep{300} & 14950 & 1 & 1\\
\dynkincomma{12, 2} & \irrep{312} & 5564 & 1 & 1\\
\dynkin{8, 4} & \irrep{315} & 3885 & 1 & 1\\
\dynkincomma{16, 1} & \irrep{323} & 8721 & 0 & 1\\
\dynkincomma{24, 0} & \irrep{325} & 17550 & 0 & 1\\
\dynkincomma{10, 3} & \irrep{330} & 4895 & 1 & 1\\
\dynkin{5, 7} & \irrep{336} & 4060 & 1 & 1\\
\dynkin{6, 6} & \irrep{343} & 4116 & 0 & \starred{1}\\
\dynkincomma{25, 0} & \irrep{351} & 20475 & 1 & 1\\
\dynkincomma{2, 13} & \irrep{357} & 7259 & 1 & 1\\
\dynkincomma{17, 1} & \irrep{360} & 10830 & 1 & 1\\
\dynkin{4, 9} & \irrep{375} & 5375 & 1 & 1\\
\dynkincomma{0, 26} & \irrep{378} & 23751 & 1 & 1\\
\dynkincomma{3, 11} & \irrep{384} & 6560 & 1 & 1\\
\dynkincomma{1, 18} & \irrep{399} & 13300 & 1 & 1\\
\dynkin{8, 5} & \irrep{405} & 5670 & 0 & 1\\
\dynkincomma{14, 2} & \irrep[1]{405} & 9315 & 0 & 1\\
\dynkincomma{27, 0} & \irrep{406} & 27405 & 0 & 1\\
\dynkin{7, 6} & \irrep{420} & 5810 & 1 & 1\\
\dynkincomma{28, 0} & \irrep{435} & 31465 & 1 & 1\\
\dynkincomma{10, 4} & \irrep{440} & 7260 & 0 & 1\\
\dynkincomma{19, 1} & \irrep[1]{440} & 16170 & 0 & 1\\
\dynkincomma{12, 3} & \irrep{442} & 8619 & 0 & 1\\
\dynkincomma{15, 2} & \irrep{456} & 11780 & 1 & 1\\
\dynkincomma{0, 29} & \irrep{465} & 35960 & 1 & 1\\
\dynkin{9, 5} & \irrep{480} & 7720 & 1 & 1\\
\dynkincomma{20, 1} & \irrep{483} & 19481 & 1 & 1\\
\dynkincomma{30, 0} & \irrep{496} & 40920 & 0 & 1\\
\dynkin{6, 8} & \irrep{504} & 7980 & 1 & 1\\
\dynkincomma{13, 3} & \irrep[1]{504} & 11130 & 1 & 1\\
\dynkincomma{11, 4} & \irrep{510} & 9605 & 1 & 1\\
\dynkincomma{2, 16} & \irrep[1]{510} & 14705 & 1 & 1\\
\dynkin{7, 7} & \irrep{512} & 8064 & 0 & \starred{1}\\
\end{longtable}
\newpage
\begin{longtable}{lrrcc}
\caption{\label{tab:SU4Irreps}SU(4) Irreps}\\
\toprule
\rowcolor{tableheadcolor}
Dynkin & Dimension & l & & SU(3)\\
\rowcolor{tableheadcolor}label & (name) & (index) & Quadrality & singlets\\
\midrule
\endfirsthead
\caption[]{SU(4) Irreps (continued)}\\
\toprule
\rowcolor{tableheadcolor}
Dynkin & Dimension & l & & SU(3)\\
\rowcolor{tableheadcolor}label & (name) & (index) & Quadrality & singlets\\
\midrule
\endhead
\multicolumn{5}{l}{\footnotesize $^\ast$SU(3)${\times}$U(1) singlet.}
\endfoot
\bottomrule
\multicolumn{5}{l}{\footnotesize $^\ast$SU(3)${\times}$U(1) singlet.}
\endlastfoot
\dynkin{1, 0, 0} & \irrep{4} & 1 & 1 & 1\\
\dynkin{0, 1, 0} & \irrep{6} & 2 & 2 & 0\\
\dynkin{2, 0, 0} & \irrep{10} & 6 & 2 & 1\\
\dynkin{1, 0, 1} & \irrep{15} & 8 & 0 & \starred{1}\\
\dynkin{0, 1, 1} & \irrep{20} & 13 & 1 & 0\\
\dynkin{0, 2, 0} & \irrep[1]{20} & 16 & 0 & 0\\
\dynkin{0, 0, 3} & \irrep[2]{20} & 21 & 1 & 1\\
\dynkin{4, 0, 0} & \irrep{35} & 56 & 0 & 1\\
\dynkin{2, 0, 1} & \irrep{36} & 33 & 1 & 1\\
\dynkin{2, 1, 0} & \irrep{45} & 48 & 0 & 0\\
\dynkin{0, 3, 0} & \irrep{50} & 70 & 2 & 0\\
\dynkin{5, 0, 0} & \irrep{56} & 126 & 1 & 1\\
\dynkin{1, 2, 0} & \irrep{60} & 71 & 1 & 0\\
\dynkin{1, 1, 1} & \irrep{64} & 64 & 2 & 0\\
\dynkin{3, 0, 1} & \irrep{70} & 98 & 2 & 1\\
\dynkin{2, 0, 2} & \irrep{84} & 112 & 0 & \starred{1}\\
\dynkin{3, 1, 0} & \irrep[1]{84} & 133 & 1 & 0\\
\dynkin{6, 0, 0} & \irrep[2]{84} & 252 & 2 & 1\\
\dynkin{0, 4, 0} & \irrep{105} & 224 & 0 & 0\\
\dynkin{1, 0, 4} & \irrep{120} & 238 & 1 & 1\\
\dynkin{0, 0, 7} & \irrep[1]{120} & 462 & 1 & 1\\
\dynkin{2, 2, 0} & \irrep{126} & 210 & 2 & 0\\
\dynkin{1, 1, 2} & \irrep{140} & 203 & 1 & 0\\
\dynkin{0, 3, 1} & \irrep[1]{140} & 259 & 1 & 0\\
\dynkin{4, 1, 0} & \irrep[2]{140} & 308 & 2 & 0\\
\dynkin{3, 0, 2} & \irrep{160} & 296 & 1 & 1\\
\dynkin{8, 0, 0} & \irrep{165} & 792 & 0 & 1\\
\dynkin{1, 2, 1} & \irrep{175} & 280 & 0 & 0\\
\dynkin{5, 0, 1} & \irrep{189} & 504 & 0 & 1\\
\dynkin{0, 5, 0} & \irrep{196} & 588 & 2 & 0\\
\dynkin{0, 1, 5} & \irrep{216} & 630 & 1 & 0\\
\dynkin{9, 0, 0} & \irrep{220} & 1287 & 1 & 1\\
\dynkin{0, 2, 3} & \irrep{224} & 504 & 1 & 0\\
\dynkin{3, 1, 1} & \irrep{256} & 512 & 0 & 0\\
\dynkin{4, 0, 2} & \irrep{270} & 666 & 2 & 1\\
\dynkin{2, 3, 0} & \irrep{280} & 672 & 0 & 0\\
\dynkin{1, 4, 0} & \irrep[1]{280} & 742 & 1 & 0\\
\dynkin{6, 0, 1} & \irrep[2]{280} & 966 & 1 & 1\\
\dynkincomma{10, 0, 0} & \irrep{286} & 2002 & 2 & 1\\
\dynkin{2, 1, 2} & \irrep{300} & 580 & 2 & 0\\
\dynkin{3, 0, 3} & \irrep[1]{300} & 720 & 0 & \starred{1}\\
\dynkin{6, 1, 0} & \irrep{315} & 1176 & 0 & 0\\
\dynkin{0, 6, 0} & \irrep{336} & 1344 & 0 & 0\\
\dynkin{2, 2, 1} & \irrep{360} & 762 & 1 & 0\\
\dynkin{4, 2, 0} & \irrep[1]{360} & 1056 & 0 & 0\\
\dynkincomma{0, 0, 11} & \irrep{364} & 3003 & 1 & 1\\
\dynkin{1, 3, 1} & \irrep{384} & 896 & 2 & 0\\
\dynkin{7, 0, 1} & \irrep{396} & 1716 & 2 & 1\\
\dynkin{4, 1, 1} & \irrep{420} & 1113 & 1 & 0\\
\dynkin{2, 0, 5} & \irrep[1]{420} & 1337 & 1 & 1\\
\dynkin{7, 1, 0} & \irrep{440} & 2046 & 1 & 0\\
\dynkincomma{12, 0, 0} & \irrep{455} & 4368 & 0 & 1\\
\dynkin{3, 3, 0} & \irrep{480} & 1464 & 1 & 0\\
\dynkin{4, 0, 3} & \irrep{500} & 1525 & 1 & 1\\
\dynkin{0, 5, 1} & \irrep{504} & 1806 & 1 & 0\\
\dynkin{2, 1, 3} & \irrep{540} & 1359 & 1 & 0\\
\dynkin{2, 4, 0} & \irrep[1]{540} & 1764 & 2 & 0\\
\dynkin{5, 2, 0} & \irrep[2]{540} & 2007 & 1 & 0\\
\dynkin{0, 7, 0} & \irrep[3]{540} & 2772 & 2 & 0\\
\dynkin{1, 0, 8} & \irrep[4]{540} & 2871 & 1 & 1\\
\dynkincomma{13, 0, 0} & \irrep{560} & 6188 & 1 & 1\\
\dynkin{8, 1, 0} & \irrep{594} & 3366 & 2 & 0\\
\dynkin{6, 0, 2} & \irrep{616} & 2464 & 0 & 1\\
\dynkin{3, 2, 1} & \irrep{630} & 1722 & 2 & 0\\
\dynkin{5, 1, 1} & \irrep{640} & 2176 & 2 & 0\\
\dynkincomma{14, 0, 0} & \irrep{680} & 8568 & 2 & 1\\
\dynkin{9, 0, 1} & \irrep{715} & 4576 & 0 & 1\\
\dynkin{2, 2, 2} & \irrep{729} & 1944 & 0 & 0\\
\dynkin{1, 4, 1} & \irrep{735} & 2352 & 0 & 0\\
\dynkin{4, 3, 0} & \irrep{750} & 2850 & 2 & 0\\
\dynkin{1, 3, 2} & \irrep{756} & 2205 & 1 & 0\\
\dynkin{5, 0, 3} & \irrep{770} & 2926 & 2 & 1\\
\dynkin{6, 2, 0} & \irrep[1]{770} & 3542 & 2 & 0\\
\dynkin{0, 1, 9} & \irrep{780} & 5291 & 1 & 0\\
\dynkincomma{0, 0, 15} & \irrep{816} & 11628 & 1 & 1\\
\dynkin{4, 0, 4} & \irrep{825} & 3080 & 0 & \starred{1}\\
\dynkin{0, 8, 0} & \irrep[1]{825} & 5280 & 0 & 0\\
\dynkin{1, 6, 0} & \irrep{840} & 3906 & 1 & 0\\
\dynkin{7, 0, 2} & \irrep{864} & 4248 & 1 & 1\\
\dynkin{4, 1, 2} & \irrep{875} & 2800 & 0 & 0\\
\dynkin{0, 4, 3} & \irrep{900} & 3585 & 1 & 0\\
\dynkin{1, 1, 6} & \irrep{924} & 3927 & 1 & 0\\
\dynkincomma{10, 0, 1} & \irrep[1]{924} & 7007 & 1 & 1\\
\dynkin{2, 5, 0} & \irrep{945} & 4032 & 0 & 0\\
\dynkin{3, 1, 3} & \irrep{960} & 3008 & 2 & 0\\
\dynkincomma{16, 0, 0} & \irrep{969} & 15504 & 0 & 1\\
\dynkin{1, 2, 4} & \irrep{1000} & 3450 & 1 & 0\\
\dynkincomma{10, 1, 0} & \irrep{1001} & 8008 & 0 & 0\\
\dynkin{0, 2, 7} & \irrep{1056} & 5896 & 1 & 0\\
\dynkin{0, 3, 5} & \irrep{1100} & 5115 & 1 & 0\\
\dynkin{3, 0, 6} & \irrep{1120} & 5208 & 1 & 1\\
\dynkincomma{17, 0, 0} & \irrep{1140} & 20349 & 1 & 1\\
\dynkin{8, 0, 2} & \irrep{1170} & 6942 & 2 & 1\\
\dynkincomma{11, 0, 1} & \irrep[1]{1170} & 10374 & 2 & 1\\
\end{longtable}
\newpage
\begin{longtable}{lrrccc}
\caption{\label{tab:SU5Irreps}SU(5) Irreps}\\
\toprule
\rowcolor{tableheadcolor}
Dynkin & Dimension & l & & SU(4) & SU(3)${\times}$SU(2)\\
\rowcolor{tableheadcolor}label & (name) & (index) & Quintality & singlets & singlets\\
\midrule
\endfirsthead
\caption[]{SU(5) Irreps (continued)}\\
\toprule
\rowcolor{tableheadcolor}
Dynkin & Dimension & l & & SU(4) & SU(3)${\times}$SU(2)\\
\rowcolor{tableheadcolor}label & (name) & (index) & Quintality & singlets & singlets\\
\midrule
\endhead
\multicolumn{6}{l}{\footnotesize $^\ast$SU(4)${\times}$U(1) and SU(3)${\times}$SU(2)${\times}$U(1) singlets resp.}
\endfoot
\bottomrule
\multicolumn{6}{l}{\footnotesize $^\ast$SU(4)${\times}$U(1) and SU(3)${\times}$SU(2)${\times}$U(1) singlets resp.}
\endlastfoot
\dynkin{1, 0, 0, 0} & \irrep{5} & 1 & 1 & 1 & 0\\
\dynkin{0, 1, 0, 0} & \irrep{10} & 3 & 2 & 0 & 1\\
\dynkin{2, 0, 0, 0} & \irrep{15} & 7 & 2 & 1 & 0\\
\dynkin{1, 0, 0, 1} & \irrep{24} & 10 & 0 & \starred{1} & \starred{1}\\
\dynkin{0, 0, 0, 3} & \irrep{35} & 28 & 2 & 1 & 0\\
\dynkin{0, 0, 1, 1} & \irrep{40} & 22 & 2 & 0 & 0\\
\dynkin{0, 1, 0, 1} & \irrep{45} & 24 & 1 & 0 & 0\\
\dynkin{0, 0, 2, 0} & \irrep{50} & 35 & 1 & 0 & 1\\
\dynkin{2, 0, 0, 1} & \irrep{70} & 49 & 1 & 1 & 0\\
\dynkin{0, 0, 0, 4} & \irrep[1]{70} & 84 & 1 & 1 & 0\\
\dynkin{0, 1, 1, 0} & \irrep{75} & 50 & 0 & 0 & \starred{1}\\
\dynkin{0, 0, 1, 2} & \irrep{105} & 91 & 1 & 0 & 0\\
\dynkin{2, 0, 1, 0} & \irrep{126} & 105 & 0 & 0 & 0\\
\dynkin{5, 0, 0, 0} & \irrep[1]{126} & 210 & 0 & 1 & 0\\
\dynkin{3, 0, 0, 1} & \irrep{160} & 168 & 2 & 1 & 0\\
\dynkin{1, 1, 0, 1} & \irrep{175} & 140 & 2 & 0 & 1\\
\dynkin{1, 2, 0, 0} & \irrep[1]{175} & 175 & 0 & 0 & 0\\
\dynkin{0, 3, 0, 0} & \irrep[2]{175} & 210 & 1 & 0 & 1\\
\dynkin{2, 0, 0, 2} & \irrep{200} & 200 & 0 & \starred{1} & \starred{1}\\
\dynkin{1, 0, 2, 0} & \irrep{210} & 203 & 2 & 0 & 0\\
\dynkin{6, 0, 0, 0} & \irrep[1]{210} & 462 & 1 & 1 & 0\\
\dynkin{3, 1, 0, 0} & \irrep{224} & 280 & 0 & 0 & 0\\
\dynkin{1, 1, 1, 0} & \irrep{280} & 266 & 1 & 0 & 0\\
\dynkin{3, 0, 1, 0} & \irrep[1]{280} & 336 & 1 & 0 & 0\\
\dynkin{0, 2, 1, 0} & \irrep{315} & 357 & 2 & 0 & 1\\
\dynkin{1, 0, 0, 4} & \irrep[1]{315} & 462 & 2 & 1 & 0\\
\dynkin{7, 0, 0, 0} & \irrep{330} & 924 & 2 & 1 & 0\\
\dynkin{2, 2, 0, 0} & \irrep{420} & 574 & 1 & 0 & 0\\
\dynkin{4, 1, 0, 0} & \irrep[1]{420} & 714 & 1 & 0 & 0\\
\dynkin{1, 0, 1, 2} & \irrep{450} & 510 & 2 & 0 & 0\\
\dynkin{3, 0, 0, 2} & \irrep[1]{450} & 615 & 1 & 1 & 0\\
\dynkin{1, 1, 0, 2} & \irrep{480} & 536 & 1 & 0 & 0\\
\dynkin{0, 0, 4, 0} & \irrep{490} & 882 & 2 & 0 & 1\\
\dynkin{0, 0, 0, 8} & \irrep{495} & 1716 & 2 & 1 & 0\\
\dynkin{4, 0, 1, 0} & \irrep{540} & 882 & 2 & 0 & 0\\
\dynkin{0, 2, 0, 2} & \irrep{560} & 728 & 2 & 0 & 0\\
\dynkin{1, 3, 0, 0} & \irrep[1]{560} & 868 & 2 & 0 & 0\\
\dynkin{1, 0, 0, 5} & \irrep[2]{560} & 1092 & 1 & 1 & 0\\
\dynkin{2, 1, 1, 0} & \irrep{700} & 910 & 2 & 0 & 0\\
\dynkin{1, 0, 3, 0} & \irrep[1]{700} & 1050 & 0 & 0 & 0\\
\dynkin{0, 0, 0, 9} & \irrep{715} & 3003 & 1 & 1 & 0\\
\dynkin{1, 0, 2, 1} & \irrep{720} & 924 & 1 & 0 & 1\\
\dynkin{5, 1, 0, 0} & \irrep[1]{720} & 1596 & 2 & 0 & 0\\
\dynkin{3, 2, 0, 0} & \irrep{840} & 1512 & 2 & 0 & 0\\
\dynkin{4, 0, 0, 2} & \irrep{875} & 1575 & 2 & 1 & 0\\
\dynkin{6, 0, 0, 1} & \irrep{924} & 2310 & 0 & 1 & 0\\
\dynkin{1, 0, 1, 3} & \irrep{945} & 1449 & 1 & 0 & 0\\
\dynkin{0, 1, 0, 5} & \irrep[1]{945} & 2016 & 2 & 0 & 0\\
\dynkin{0, 1, 3, 0} & \irrep{980} & 1666 & 1 & 0 & 1\\
\dynkin{3, 0, 0, 3} & \irrep{1000} & 1750 & 0 & \starred{1} & \starred{1}\\
\dynkincomma{10, 0, 0, 0} & \irrep{1001} & 5005 & 0 & 1 & 0\\
\dynkin{1, 1, 1, 1} & \irrep{1024} & 1280 & 0 & 0 & \starred{1}\\
\dynkin{0, 1, 2, 1} & \irrep{1050} & 1540 & 2 & 0 & 0\\
\dynkin{3, 0, 1, 1} & \irrep[1]{1050} & 1575 & 0 & 0 & 0\\
\dynkin{0, 2, 1, 1} & \irrep{1120} & 1624 & 1 & 0 & 0\\
\dynkin{0, 0, 1, 6} & \irrep{1155} & 3234 & 2 & 0 & 0\\
\dynkin{0, 2, 2, 0} & \irrep{1176} & 1960 & 0 & 0 & \starred{1}\\
\dynkin{0, 5, 0, 0} & \irrep[1]{1176} & 2940 & 0 & 0 & 1\\
\dynkin{0, 2, 0, 3} & \irrep{1200} & 2040 & 1 & 0 & 0\\
\dynkin{2, 1, 0, 2} & \irrep{1215} & 1782 & 2 & 0 & 1\\
\dynkin{0, 0, 3, 2} & \irrep{1260} & 2478 & 2 & 0 & 0\\
\dynkincomma{11, 0, 0, 0} & \irrep{1365} & 8008 & 1 & 1 & 0\\
\dynkin{0, 1, 1, 3} & \irrep{1440} & 2472 & 2 & 0 & 0\\
\dynkin{7, 0, 0, 1} & \irrep[1]{1440} & 4488 & 1 & 1 & 0\\
\dynkin{0, 0, 4, 1} & \irrep{1470} & 3234 & 1 & 0 & 0\\
\dynkin{0, 0, 2, 4} & \irrep{1500} & 3450 & 2 & 0 & 0\\
\dynkin{2, 0, 0, 5} & \irrep{1540} & 3542 & 2 & 1 & 0\\
\dynkin{0, 1, 0, 6} & \irrep[1]{1540} & 4158 & 1 & 0 & 0\\
\dynkin{2, 2, 0, 1} & \irrep{1701} & 2835 & 0 & 0 & 0\\
\dynkin{4, 1, 0, 1} & \irrep{1750} & 3500 & 0 & 0 & 0\\
\dynkin{0, 0, 1, 7} & \irrep{1760} & 6072 & 1 & 0 & 0\\
\dynkin{2, 0, 3, 0} & \irrep{1800} & 3360 & 1 & 0 & 0\\
\dynkincomma{12, 0, 0, 0} & \irrep{1820} & 12376 & 2 & 1 & 0\\
\dynkin{2, 0, 2, 1} & \irrep{1890} & 3087 & 2 & 0 & 0\\
\dynkin{0, 4, 0, 1} & \irrep[1]{1890} & 4032 & 2 & 0 & 0\\
\dynkin{4, 0, 0, 3} & \irrep{1925} & 4235 & 1 & 1 & 0\\
\dynkin{4, 0, 1, 1} & \irrep{2000} & 3900 & 1 & 0 & 0\\
\dynkin{8, 0, 0, 1} & \irrep{2145} & 8151 & 2 & 1 & 0\\
\dynkin{1, 3, 0, 1} & \irrep{2205} & 4116 & 1 & 0 & 1\\
\dynkin{4, 0, 2, 0} & \irrep{2250} & 4875 & 0 & 0 & 0\\
\dynkin{7, 0, 1, 0} & \irrep{2376} & 7920 & 0 & 0 & 0\\
\dynkincomma{0, 0, 0, 13} & \irrep{2380} & 18564 & 2 & 1 & 0\\
\dynkin{0, 0, 3, 3} & \irrep{2400} & 5880 & 1 & 0 & 0\\
\dynkin{0, 1, 2, 2} & \irrep{2430} & 4536 & 1 & 0 & 0\\
\dynkin{0, 0, 2, 5} & \irrep{2475} & 7095 & 1 & 0 & 0\\
\dynkin{2, 1, 1, 1} & \irrep{2520} & 4074 & 1 & 0 & 0\\
\dynkin{2, 0, 1, 3} & \irrep[1]{2520} & 4746 & 2 & 0 & 0\\
\dynkin{0, 4, 1, 0} & \irrep[2]{2520} & 5964 & 1 & 0 & 1\\
\dynkin{2, 0, 0, 6} & \irrep[3]{2520} & 7224 & 1 & 1 & 0\\
\dynkin{0, 6, 0, 0} & \irrep[4]{2520} & 8316 & 2 & 0 & 1\\
\dynkin{8, 1, 0, 0} & \irrep{2574} & 10725 & 0 & 0 & 0\\
\dynkin{2, 1, 0, 3} & \irrep{2625} & 4900 & 1 & 0 & 0\\
\dynkin{0, 1, 1, 4} & \irrep[1]{2625} & 5775 & 1 & 0 & 0\\
\dynkin{2, 1, 2, 0} & \irrep{2700} & 4950 & 0 & 0 & 0\\
\dynkin{5, 1, 0, 1} & \irrep{2970} & 7524 & 1 & 0 & 0\\
\end{longtable}
\newpage
\begin{longtable}{lrrcccc}
\caption{\label{tab:SU6Irreps}SU(6) Irreps}\\
\toprule
\rowcolor{tableheadcolor}
Dynkin & Dimension & l & & SU(5) & SU(4)${\times}$SU(2) & SU(3)${\times}$SU(3)\\
\rowcolor{tableheadcolor}label & (name) & (index) & Sextality & singlets & singlets & singlets\\
\midrule
\endfirsthead
\caption[]{SU(6) Irreps (continued)}\\
\toprule
\rowcolor{tableheadcolor}
Dynkin & Dimension & l & & SU(5) & SU(4)${\times}$SU(2) & SU(3)${\times}$SU(3)\\
\rowcolor{tableheadcolor}label & (name) & (index) & Sextality & singlets & singlets & singlets\\
\midrule
\endhead
\multicolumn{7}{l}{\footnotesize $^\ast$SU(5)${\times}$U(1) and SU(4)${\times}$SU(2)${\times}$U(1) and SU(3)${\times}$SU(3)${\times}$U(1) singlets resp.}
\endfoot
\bottomrule
\multicolumn{7}{l}{\footnotesize $^\ast$SU(5)${\times}$U(1) and SU(4)${\times}$SU(2)${\times}$U(1) and SU(3)${\times}$SU(3)${\times}$U(1) singlets resp.}
\endlastfoot
\dynkin{1, 0, 0, 0, 0} & \irrep{6} & 1 & 1 & 1 & 0 & 0\\
\dynkin{0, 1, 0, 0, 0} & \irrep{15} & 4 & 2 & 0 & 1 & 0\\
\dynkin{0, 0, 1, 0, 0} & \irrep{20} & 6 & 3 & 0 & 0 & 2\\
\dynkin{2, 0, 0, 0, 0} & \irrep{21} & 8 & 2 & 1 & 0 & 0\\
\dynkin{1, 0, 0, 0, 1} & \irrep{35} & 12 & 0 & \starred{1} & \starred{1} & \starred{1}\\
\dynkin{3, 0, 0, 0, 0} & \irrep{56} & 36 & 3 & 1 & 0 & 0\\
\dynkin{1, 1, 0, 0, 0} & \irrep{70} & 33 & 3 & 0 & 0 & 0\\
\dynkin{0, 1, 0, 0, 1} & \irrep{84} & 38 & 1 & 0 & 0 & 0\\
\dynkin{0, 0, 1, 0, 1} & \irrep{105} & 52 & 2 & 0 & 0 & 0\\
\dynkin{0, 0, 0, 2, 0} & \irrep[1]{105} & 64 & 2 & 0 & 1 & 0\\
\dynkin{2, 0, 0, 0, 1} & \irrep{120} & 68 & 1 & 1 & 0 & 0\\
\dynkin{0, 0, 0, 0, 4} & \irrep{126} & 120 & 2 & 1 & 0 & 0\\
\dynkin{0, 0, 2, 0, 0} & \irrep{175} & 120 & 0 & 0 & 0 & 2+\starred{1}\\
\dynkin{0, 1, 0, 1, 0} & \irrep{189} & 108 & 0 & 0 & \starred{1} & \starred{1}\\
\dynkin{0, 0, 1, 1, 0} & \irrep{210} & 131 & 1 & 0 & 0 & 0\\
\dynkin{0, 0, 0, 1, 2} & \irrep[1]{210} & 152 & 2 & 0 & 0 & 0\\
\dynkin{0, 0, 0, 0, 5} & \irrep{252} & 330 & 1 & 1 & 0 & 0\\
\dynkin{2, 0, 0, 1, 0} & \irrep{280} & 192 & 0 & 0 & 0 & 0\\
\dynkin{3, 0, 0, 0, 1} & \irrep{315} & 264 & 2 & 1 & 0 & 0\\
\dynkin{0, 0, 1, 0, 2} & \irrep{336} & 248 & 1 & 0 & 0 & 0\\
\dynkin{1, 1, 0, 0, 1} & \irrep{384} & 256 & 2 & 0 & 1 & 0\\
\dynkin{2, 0, 0, 0, 2} & \irrep{405} & 324 & 0 & \starred{1} & \starred{1} & \starred{1}\\
\dynkin{0, 0, 0, 2, 1} & \irrep{420} & 358 & 1 & 0 & 0 & 0\\
\dynkin{6, 0, 0, 0, 0} & \irrep{462} & 792 & 0 & 1 & 0 & 0\\
\dynkin{0, 3, 0, 0, 0} & \irrep{490} & 504 & 0 & 0 & 1 & 0\\
\dynkin{0, 0, 0, 1, 3} & \irrep{504} & 516 & 1 & 0 & 0 & 0\\
\dynkin{1, 0, 1, 0, 1} & \irrep{540} & 378 & 3 & 0 & 0 & 2\\
\dynkin{1, 0, 0, 2, 0} & \irrep{560} & 456 & 3 & 0 & 0 & 0\\
\dynkin{4, 0, 0, 0, 1} & \irrep{700} & 810 & 3 & 1 & 0 & 0\\
\dynkin{3, 0, 0, 1, 0} & \irrep{720} & 696 & 1 & 0 & 0 & 0\\
\dynkin{7, 0, 0, 0, 0} & \irrep{792} & 1716 & 1 & 1 & 0 & 0\\
\dynkin{1, 1, 0, 1, 0} & \irrep{840} & 668 & 1 & 0 & 0 & 0\\
\dynkin{1, 0, 2, 0, 0} & \irrep[1]{840} & 764 & 1 & 0 & 0 & 0\\
\dynkin{3, 0, 1, 0, 0} & \irrep[2]{840} & 864 & 0 & 0 & 0 & 0\\
\dynkin{1, 1, 1, 0, 0} & \irrep{896} & 768 & 0 & 0 & 0 & 0\\
\dynkin{0, 0, 3, 0, 0} & \irrep{980} & 1134 & 3 & 0 & 0 & 4\\
\dynkin{1, 0, 1, 1, 0} & \irrep{1050} & 880 & 2 & 0 & 0 & 0\\
\dynkin{3, 0, 0, 0, 2} & \irrep[1]{1050} & 1135 & 1 & 1 & 0 & 0\\
\dynkin{4, 1, 0, 0, 0} & \irrep[2]{1050} & 1440 & 0 & 0 & 0 & 0\\
\dynkin{2, 1, 0, 0, 1} & \irrep{1134} & 1053 & 3 & 0 & 0 & 0\\
\dynkin{2, 2, 0, 0, 0} & \irrep[1]{1134} & 1296 & 0 & 0 & 0 & 0\\
\dynkin{0, 2, 0, 1, 0} & \irrep{1176} & 1120 & 2 & 0 & 1 & 0\\
\dynkin{0, 2, 1, 0, 0} & \irrep[1]{1176} & 1204 & 1 & 0 & 0 & 0\\
\dynkin{1, 1, 0, 0, 2} & \irrep{1260} & 1146 & 1 & 0 & 0 & 0\\
\dynkin{8, 0, 0, 0, 0} & \irrep{1287} & 3432 & 2 & 1 & 0 & 0\\
\dynkin{1, 0, 0, 0, 5} & \irrep{1386} & 2112 & 2 & 1 & 0 & 0\\
\dynkin{0, 1, 2, 0, 0} & \irrep{1470} & 1568 & 2 & 0 & 0 & 0\\
\dynkin{4, 0, 0, 1, 0} & \irrep{1575} & 2040 & 2 & 0 & 0 & 0\\
\dynkin{1, 0, 1, 0, 2} & \irrep{1701} & 1620 & 2 & 0 & 0 & 0\\
\dynkin{1, 3, 0, 0, 0} & \irrep{1764} & 2310 & 1 & 0 & 0 & 0\\
\dynkin{0, 4, 0, 0, 0} & \irrep[1]{1764} & 2688 & 2 & 0 & 1 & 0\\
\dynkin{0, 2, 0, 0, 2} & \irrep{1800} & 1920 & 2 & 0 & 0 & 0\\
\dynkin{4, 0, 1, 0, 0} & \irrep[1]{1800} & 2460 & 1 & 0 & 0 & 0\\
\dynkin{0, 1, 1, 1, 0} & \irrep{1960} & 1932 & 3 & 0 & 0 & 2\\
\dynkin{5, 1, 0, 0, 0} & \irrep{1980} & 3498 & 1 & 0 & 0 & 0\\
\dynkin{9, 0, 0, 0, 0} & \irrep{2002} & 6435 & 3 & 1 & 0 & 0\\
\dynkin{1, 0, 0, 2, 1} & \irrep{2205} & 2352 & 2 & 0 & 1 & 0\\
\dynkin{4, 0, 0, 0, 2} & \irrep{2310} & 3256 & 2 & 1 & 0 & 0\\
\dynkin{2, 1, 0, 1, 0} & \irrep{2430} & 2592 & 2 & 0 & 0 & 0\\
\dynkin{2, 1, 1, 0, 0} & \irrep{2520} & 2868 & 1 & 0 & 0 & 0\\
\dynkin{2, 0, 2, 0, 0} & \irrep[1]{2520} & 2976 & 2 & 0 & 0 & 0\\
\dynkin{1, 0, 0, 3, 0} & \irrep[2]{2520} & 3156 & 1 & 0 & 0 & 0\\
\dynkin{3, 2, 0, 0, 0} & \irrep[3]{2520} & 3732 & 1 & 0 & 0 & 0\\
\dynkin{1, 0, 0, 0, 6} & \irrep[4]{2520} & 4884 & 1 & 1 & 0 & 0\\
\dynkin{1, 0, 0, 1, 3} & \irrep{2688} & 3328 & 2 & 0 & 0 & 0\\
\dynkin{3, 0, 0, 0, 3} & \irrep{2695} & 3696 & 0 & \starred{1} & \starred{1} & \starred{1}\\
\dynkincomma{0, 0, 0, 0, 10} & \irrep{3003} & 11440 & 2 & 1 & 0 & 0\\
\dynkin{5, 0, 0, 1, 0} & \irrep{3080} & 5148 & 3 & 0 & 0 & 0\\
\dynkin{3, 0, 0, 1, 1} & \irrep{3200} & 3840 & 0 & 0 & 0 & 0\\
\dynkin{2, 0, 1, 1, 0} & \irrep{3240} & 3564 & 3 & 0 & 0 & 0\\
\dynkin{5, 0, 1, 0, 0} & \irrep{3465} & 6072 & 2 & 0 & 0 & 0\\
\dynkin{6, 1, 0, 0, 0} & \irrep[1]{3465} & 7656 & 2 & 0 & 0 & 0\\
\dynkin{1, 1, 0, 1, 1} & \irrep{3675} & 3780 & 0 & 0 & \starred{1} & \starred{1}\\
\dynkin{2, 1, 0, 0, 2} & \irrep[1]{3675} & 4340 & 2 & 0 & 1 & 0\\
\dynkin{1, 0, 2, 0, 1} & \irrep{3969} & 4536 & 0 & 0 & 0 & 2+\starred{1}\\
\dynkin{0, 0, 4, 0, 0} & \irrep{4116} & 7056 & 0 & 0 & 0 & 4+\starred{1}\\
\dynkin{1, 0, 1, 0, 3} & \irrep{4200} & 5260 & 1 & 0 & 0 & 0\\
\dynkin{7, 0, 0, 0, 1} & \irrep{4290} & 10296 & 0 & 1 & 0 & 0\\
\dynkincomma{0, 0, 0, 0, 11} & \irrep{4368} & 19448 & 1 & 1 & 0 & 0\\
\dynkin{1, 0, 1, 1, 1} & \irrep{4410} & 4767 & 1 & 0 & 0 & 0\\
\dynkin{1, 2, 1, 0, 0} & \irrep[1]{4410} & 5712 & 2 & 0 & 0 & 0\\
\dynkin{0, 0, 3, 0, 1} & \irrep[2]{4410} & 6216 & 2 & 0 & 0 & 0\\
\dynkin{2, 3, 0, 0, 0} & \irrep[3]{4410} & 7224 & 2 & 0 & 0 & 0\\
\dynkin{0, 2, 0, 0, 3} & \irrep{4500} & 6150 & 1 & 0 & 0 & 0\\
\dynkin{1, 2, 0, 1, 0} & \irrep{4536} & 5508 & 3 & 0 & 0 & 0\\
\dynkin{5, 0, 0, 0, 2} & \irrep[1]{4536} & 8100 & 3 & 1 & 0 & 0\\
\dynkin{0, 3, 1, 0, 0} & \irrep{4704} & 7056 & 3 & 0 & 0 & 0\\
\dynkin{4, 2, 0, 0, 0} & \irrep{4950} & 9240 & 2 & 0 & 0 & 0\\
\dynkin{0, 2, 0, 1, 1} & \irrep{5040} & 6024 & 1 & 0 & 0 & 0\\
\dynkin{0, 1, 0, 3, 0} & \irrep[1]{5040} & 7104 & 2 & 0 & 1 & 0\\
\dynkin{2, 0, 1, 0, 2} & \irrep{5292} & 6426 & 3 & 0 & 0 & 2\\
\dynkin{0, 0, 0, 5, 0} & \irrep[1]{5292} & 11088 & 2 & 0 & 1 & 0\\
\dynkin{1, 0, 0, 1, 4} & \irrep{5544} & 8844 & 1 & 0 & 0 & 0\\
\dynkin{0, 1, 0, 0, 6} & \irrep[1]{5544} & 11616 & 2 & 0 & 0 & 0\\
\dynkin{1, 0, 1, 2, 0} & \irrep{5670} & 7128 & 0 & 0 & 0 & 0\\
\dynkin{3, 1, 0, 1, 0} & \irrep[1]{5670} & 7857 & 3 & 0 & 0 & 0\\
\end{longtable}
\newpage
\begin{longtable}{lrrcccc}
\caption{\label{tab:SU7Irreps}SU(7) Irreps}\\
\toprule
\rowcolor{tableheadcolor}
Dynkin & Dimension & l & & SU(6) & SU(5)${\times}$SU(2) & SU(4)${\times}$SU(3)\\
\rowcolor{tableheadcolor}label & (name) & (index) & Septality & singlets & singlets & singlets\\
\midrule
\endfirsthead
\caption[]{SU(7) Irreps (continued)}\\
\toprule
\rowcolor{tableheadcolor}
Dynkin & Dimension & l & & SU(6) & SU(5)${\times}$SU(2) & SU(4)${\times}$SU(3)\\
\rowcolor{tableheadcolor}label & (name) & (index) & Septality & singlets & singlets & singlets\\
\midrule
\endhead
\multicolumn{7}{l}{\footnotesize $^\ast$SU(6)${\times}$U(1) and SU(5)${\times}$SU(2)${\times}$U(1) and SU(4)${\times}$SU(3)${\times}$U(1) singlets resp.}
\endfoot
\bottomrule
\multicolumn{7}{l}{\footnotesize $^\ast$SU(6)${\times}$U(1) and SU(5)${\times}$SU(2)${\times}$U(1) and SU(4)${\times}$SU(3)${\times}$U(1) singlets resp.}
\endlastfoot
\dynkin{1, 0, 0, 0, 0, 0} & \irrep{7} & 1 & 1 & 1 & 0 & 0\\
\dynkin{0, 1, 0, 0, 0, 0} & \irrep{21} & 5 & 2 & 0 & 1 & 0\\
\dynkin{2, 0, 0, 0, 0, 0} & \irrep{28} & 9 & 2 & 1 & 0 & 0\\
\dynkin{0, 0, 1, 0, 0, 0} & \irrep{35} & 10 & 3 & 0 & 0 & 1\\
\dynkin{1, 0, 0, 0, 0, 1} & \irrep{48} & 14 & 0 & \starred{1} & \starred{1} & \starred{1}\\
\dynkin{3, 0, 0, 0, 0, 0} & \irrep{84} & 45 & 3 & 1 & 0 & 0\\
\dynkin{1, 1, 0, 0, 0, 0} & \irrep{112} & 46 & 3 & 0 & 0 & 0\\
\dynkin{0, 1, 0, 0, 0, 1} & \irrep{140} & 55 & 1 & 0 & 0 & 0\\
\dynkin{2, 0, 0, 0, 0, 1} & \irrep{189} & 90 & 1 & 1 & 0 & 0\\
\dynkin{0, 0, 0, 0, 2, 0} & \irrep{196} & 105 & 3 & 0 & 1 & 0\\
\dynkin{0, 0, 0, 1, 0, 1} & \irrep{210} & 95 & 3 & 0 & 0 & 0\\
\dynkin{0, 0, 0, 0, 0, 4} & \irrep[1]{210} & 165 & 3 & 1 & 0 & 0\\
\dynkin{0, 0, 1, 0, 0, 1} & \irrep{224} & 100 & 2 & 0 & 0 & 0\\
\dynkin{0, 0, 0, 0, 1, 2} & \irrep{378} & 234 & 3 & 0 & 0 & 0\\
\dynkin{0, 1, 0, 0, 1, 0} & \irrep{392} & 196 & 0 & 0 & \starred{1} & \starred{1}\\
\dynkin{0, 0, 0, 0, 0, 5} & \irrep{462} & 495 & 2 & 1 & 0 & 0\\
\dynkin{0, 0, 0, 1, 1, 0} & \irrep{490} & 280 & 2 & 0 & 0 & 0\\
\dynkin{0, 0, 0, 2, 0, 0} & \irrep[1]{490} & 315 & 1 & 0 & 0 & 1\\
\dynkin{2, 0, 0, 0, 1, 0} & \irrep{540} & 315 & 0 & 0 & 0 & 0\\
\dynkin{3, 0, 0, 0, 0, 1} & \irrep{560} & 390 & 2 & 1 & 0 & 0\\
\dynkin{0, 0, 1, 0, 1, 0} & \irrep{588} & 329 & 1 & 0 & 0 & 0\\
\dynkin{1, 1, 0, 0, 0, 1} & \irrep{735} & 420 & 2 & 0 & 1 & 0\\
\dynkin{2, 0, 0, 0, 0, 2} & \irrep[1]{735} & 490 & 0 & \starred{1} & \starred{1} & \starred{1}\\
\dynkin{0, 0, 0, 1, 0, 2} & \irrep{756} & 495 & 2 & 0 & 0 & 0\\
\dynkin{0, 0, 1, 1, 0, 0} & \irrep{784} & 490 & 0 & 0 & 0 & \starred{1}\\
\dynkin{0, 0, 1, 0, 0, 2} & \irrep{840} & 540 & 1 & 0 & 0 & 0\\
\dynkin{0, 0, 0, 0, 2, 1} & \irrep{882} & 651 & 2 & 0 & 0 & 0\\
\dynkin{0, 0, 0, 0, 0, 6} & \irrep{924} & 1287 & 1 & 1 & 0 & 0\\
\dynkin{0, 0, 0, 0, 1, 3} & \irrep{1008} & 870 & 2 & 0 & 0 & 0\\
\dynkin{0, 0, 0, 0, 3, 0} & \irrep{1176} & 1050 & 1 & 0 & 1 & 0\\
\dynkin{0, 2, 0, 0, 0, 1} & \irrep{1260} & 885 & 3 & 0 & 0 & 0\\
\dynkin{1, 0, 1, 0, 0, 1} & \irrep{1323} & 819 & 3 & 0 & 0 & 1\\
\dynkin{4, 0, 0, 0, 0, 1} & \irrep{1386} & 1320 & 3 & 1 & 0 & 0\\
\dynkin{3, 0, 0, 0, 1, 0} & \irrep{1575} & 1275 & 1 & 0 & 0 & 0\\
\dynkin{7, 0, 0, 0, 0, 0} & \irrep{1716} & 3003 & 0 & 1 & 0 & 0\\
\dynkin{1, 1, 0, 0, 1, 0} & \irrep{2016} & 1380 & 1 & 0 & 0 & 0\\
\dynkin{0, 0, 0, 1, 0, 3} & \irrep{2100} & 1875 & 1 & 0 & 0 & 0\\
\dynkin{3, 0, 0, 0, 0, 2} & \irrep{2156} & 1925 & 1 & 1 & 0 & 0\\
\dynkin{0, 0, 0, 0, 1, 4} & \irrep{2310} & 2640 & 1 & 0 & 0 & 0\\
\dynkin{0, 0, 0, 1, 1, 1} & \irrep{2352} & 1806 & 1 & 0 & 0 & 0\\
\dynkin{3, 0, 0, 1, 0, 0} & \irrep{2400} & 2100 & 0 & 0 & 0 & 0\\
\dynkin{2, 1, 0, 0, 0, 1} & \irrep{2450} & 1925 & 3 & 0 & 0 & 0\\
\dynkin{1, 0, 2, 0, 0, 0} & \irrep{2646} & 2205 & 0 & 0 & 0 & 0\\
\dynkin{0, 0, 0, 0, 2, 2} & \irrep[1]{2646} & 2583 & 1 & 0 & 0 & 0\\
\dynkin{1, 1, 0, 0, 0, 2} & \irrep{2800} & 2150 & 1 & 0 & 0 & 0\\
\dynkin{1, 1, 0, 1, 0, 0} & \irrep{2940} & 2205 & 0 & 0 & 0 & 0\\
\dynkin{1, 0, 0, 2, 0, 0} & \irrep[1]{2940} & 2415 & 2 & 0 & 0 & 0\\
\end{longtable}
\newpage
\setlength{\tabcolsep}{5.5pt}
\begin{longtable}{lrrccccc}
\caption{\label{tab:SU8Irreps}SU(8) Irreps}\\
\toprule
\rowcolor{tableheadcolor}
Dynkin & Dimension & l & & SU(7) & SU(6)${\times}$SU(2) & SU(5)${\times}$SU(3) & SU(4)${\times}$SU(4)\\
\rowcolor{tableheadcolor}label & (name) & (index) & Octality & singlets & singlets & singlets & singlets\\
\midrule
\endfirsthead
\caption[]{SU(8) Irreps (continued)}\\
\toprule
\rowcolor{tableheadcolor}
Dynkin & Dimension & l & & SU(7) & SU(6)${\times}$SU(2) & SU(5)${\times}$SU(3) & SU(4)${\times}$SU(4)\\
\rowcolor{tableheadcolor}label & (name) & (index) & Octality & singlets & singlets & singlets & singlets\\
\midrule
\endhead
\multicolumn{8}{l}{\footnotesize $^\ast$SU(7)${\times}$U(1) and SU(6)${\times}$SU(2)${\times}$U(1) and SU(5)${\times}$SU(3)${\times}$U(1) and SU(4)${\times}$SU(4)${\times}$U(1) singlets resp.}
\endfoot
\bottomrule
\multicolumn{8}{l}{\footnotesize $^\ast$SU(7)${\times}$U(1) and SU(6)${\times}$SU(2)${\times}$U(1) and SU(5)${\times}$SU(3)${\times}$U(1) and SU(4)${\times}$SU(4)${\times}$U(1) singlets resp.}
\endlastfoot
\dynkin{1, 0, 0, 0, 0, 0, 0} & \irrep{8} & 1 & 1 & 1 & 0 & 0 & 0\\
\dynkin{0, 1, 0, 0, 0, 0, 0} & \irrep{28} & 6 & 2 & 0 & 1 & 0 & 0\\
\dynkin{2, 0, 0, 0, 0, 0, 0} & \irrep{36} & 10 & 2 & 1 & 0 & 0 & 0\\
\dynkin{0, 0, 1, 0, 0, 0, 0} & \irrep{56} & 15 & 3 & 0 & 0 & 1 & 0\\
\dynkin{1, 0, 0, 0, 0, 0, 1} & \irrep{63} & 16 & 0 & \starred{1} & \starred{1} & \starred{1} & \starred{1}\\
\dynkin{0, 0, 0, 1, 0, 0, 0} & \irrep{70} & 20 & 4 & 0 & 0 & 0 & 2\\
\dynkin{3, 0, 0, 0, 0, 0, 0} & \irrep{120} & 55 & 3 & 1 & 0 & 0 & 0\\
\dynkin{1, 1, 0, 0, 0, 0, 0} & \irrep{168} & 61 & 3 & 0 & 0 & 0 & 0\\
\dynkin{0, 1, 0, 0, 0, 0, 1} & \irrep{216} & 75 & 1 & 0 & 0 & 0 & 0\\
\dynkin{2, 0, 0, 0, 0, 0, 1} & \irrep{280} & 115 & 1 & 1 & 0 & 0 & 0\\
\dynkin{4, 0, 0, 0, 0, 0, 0} & \irrep{330} & 220 & 4 & 1 & 0 & 0 & 0\\
\dynkin{0, 2, 0, 0, 0, 0, 0} & \irrep{336} & 160 & 4 & 0 & 1 & 0 & 0\\
\dynkin{1, 0, 1, 0, 0, 0, 0} & \irrep{378} & 156 & 4 & 0 & 0 & 0 & 0\\
\dynkin{0, 0, 1, 0, 0, 0, 1} & \irrep{420} & 170 & 2 & 0 & 0 & 0 & 0\\
\dynkin{0, 0, 0, 1, 0, 0, 1} & \irrep{504} & 215 & 3 & 0 & 0 & 0 & 0\\
\dynkin{2, 1, 0, 0, 0, 0, 0} & \irrep{630} & 340 & 4 & 0 & 0 & 0 & 0\\
\dynkin{0, 1, 0, 0, 0, 1, 0} & \irrep{720} & 320 & 0 & 0 & \starred{1} & \starred{1} & \starred{1}\\
\dynkin{0, 0, 0, 0, 0, 0, 5} & \irrep{792} & 715 & 3 & 1 & 0 & 0 & 0\\
\dynkin{3, 0, 0, 0, 0, 0, 1} & \irrep{924} & 550 & 2 & 1 & 0 & 0 & 0\\
\dynkin{2, 0, 0, 0, 0, 1, 0} & \irrep{945} & 480 & 0 & 0 & 0 & 0 & 0\\
\dynkin{0, 0, 0, 0, 1, 1, 0} & \irrep{1008} & 526 & 3 & 0 & 0 & 0 & 0\\
\dynkin{0, 0, 0, 0, 2, 0, 0} & \irrep{1176} & 700 & 2 & 0 & 0 & 1 & 0\\
\dynkin{2, 0, 0, 0, 0, 0, 2} & \irrep{1232} & 704 & 0 & \starred{1} & \starred{1} & \starred{1} & \starred{1}\\
\dynkin{1, 1, 0, 0, 0, 0, 1} & \irrep{1280} & 640 & 2 & 0 & 1 & 0 & 0\\
\dynkin{0, 0, 1, 0, 0, 1, 0} & \irrep{1344} & 680 & 1 & 0 & 0 & 0 & 0\\
\dynkin{0, 0, 0, 1, 0, 1, 0} & \irrep{1512} & 804 & 2 & 0 & 0 & 0 & 0\\
\dynkin{0, 0, 0, 0, 1, 0, 2} & \irrep[1]{1512} & 885 & 3 & 0 & 0 & 0 & 0\\
\dynkin{0, 0, 0, 0, 0, 2, 1} & \irrep{1680} & 1090 & 3 & 0 & 0 & 0 & 0\\
\dynkin{0, 0, 0, 0, 0, 0, 6} & \irrep{1716} & 2002 & 2 & 1 & 0 & 0 & 0\\
\dynkin{0, 0, 0, 2, 0, 0, 0} & \irrep{1764} & 1120 & 0 & 0 & 0 & 0 & 2+\starred{1}\\
\dynkin{0, 0, 1, 0, 0, 0, 2} & \irrep{1800} & 1025 & 1 & 0 & 0 & 0 & 0\\
\dynkin{0, 0, 0, 0, 0, 1, 3} & \irrep{1848} & 1375 & 3 & 0 & 0 & 0 & 0\\
\dynkin{0, 0, 0, 1, 0, 0, 2} & \irrep{2100} & 1250 & 2 & 0 & 0 & 0 & 0\\
\dynkin{0, 0, 1, 0, 1, 0, 0} & \irrep{2352} & 1344 & 0 & 0 & 0 & \starred{1} & \starred{1}\\
\dynkin{0, 0, 0, 1, 1, 0, 0} & \irrep[1]{2352} & 1414 & 1 & 0 & 0 & 0 & 0\\
\dynkin{0, 2, 0, 0, 0, 0, 1} & \irrep{2520} & 1555 & 3 & 0 & 0 & 0 & 0\\
\dynkin{0, 0, 0, 0, 0, 3, 0} & \irrep[1]{2520} & 1980 & 2 & 0 & 1 & 0 & 0\\
\dynkin{4, 0, 0, 0, 0, 0, 1} & \irrep[2]{2520} & 2035 & 3 & 1 & 0 & 0 & 0\\
\dynkin{1, 0, 1, 0, 0, 0, 1} & \irrep{2800} & 1550 & 3 & 0 & 0 & 1 & 0\\
\dynkin{3, 0, 0, 0, 0, 1, 0} & \irrep{3080} & 2145 & 1 & 0 & 0 & 0 & 0\\
\dynkin{0, 0, 0, 0, 0, 0, 7} & \irrep{3432} & 5005 & 1 & 1 & 0 & 0 & 0\\
\dynkin{1, 0, 0, 1, 0, 0, 1} & \irrep{3584} & 2048 & 4 & 0 & 0 & 0 & 2\\
\dynkin{3, 0, 0, 0, 0, 0, 2} & \irrep{4032} & 3064 & 1 & 1 & 0 & 0 & 0\\
\dynkin{1, 1, 0, 0, 0, 1, 0} & \irrep{4200} & 2525 & 1 & 0 & 0 & 0 & 0\\
\dynkin{0, 0, 0, 0, 1, 0, 3} & \irrep{4620} & 3630 & 2 & 0 & 0 & 0 & 0\\
\dynkin{0, 0, 0, 0, 0, 1, 4} & \irrep[1]{4620} & 4510 & 2 & 0 & 0 & 0 & 0\\
\dynkin{2, 1, 0, 0, 0, 0, 1} & \irrep{4752} & 3234 & 3 & 0 & 0 & 0 & 0\\
\dynkin{0, 0, 0, 0, 1, 1, 1} & \irrep{5376} & 3712 & 2 & 0 & 0 & 0 & 0\\
\end{longtable}
\newpage
\setlength{\tabcolsep}{5pt}
\enlargethispage{20pt}
{
\begin{longtable}{lrrccccc}
\caption{\label{tab:SU9Irreps}SU(9) Irreps}\\
\toprule
\rowcolor{tableheadcolor}
Dynkin & Dimension & l & & SU(8) & SU(7)${\times}$SU(2) & SU(6)${\times}$SU(3) & SU(5)${\times}$SU(4)\\
\rowcolor{tableheadcolor}label & (name) & (index) & Nonality & singlets & singlets & singlets & singlets\\
\midrule
\endfirsthead
\caption[]{SU(9) Irreps (continued)}\\
\toprule
\rowcolor{tableheadcolor}
Dynkin & Dimension & l & & SU(8) & SU(7)${\times}$SU(2) & SU(6)${\times}$SU(3) & SU(5)${\times}$SU(4)\\
\rowcolor{tableheadcolor}label & (name) & (index) & Nonality & singlets & singlets & singlets & singlets\\
\midrule
\endhead
\multicolumn{8}{l}{\footnotesize $^\ast$SU(8)${\times}$U(1) and SU(7)${\times}$SU(2)${\times}$U(1) and SU(6)${\times}$SU(3)${\times}$U(1) and SU(5)${\times}$SU(4)${\times}$U(1) singlets resp.}
\endfoot
\bottomrule
\multicolumn{8}{l}{\footnotesize $^\ast$SU(8)${\times}$U(1) and SU(7)${\times}$SU(2)${\times}$U(1) and SU(6)${\times}$SU(3)${\times}$U(1) and SU(5)${\times}$SU(4)${\times}$U(1) singlets resp.}
\endlastfoot
\dynkin{1, 0, 0, 0, 0, 0, 0, 0} & \irrep{9} & 1 & 1 & 1 & 0 & 0 & 0\\
\dynkin{0, 1, 0, 0, 0, 0, 0, 0} & \irrep{36} & 7 & 2 & 0 & 1 & 0 & 0\\
\dynkin{2, 0, 0, 0, 0, 0, 0, 0} & \irrep{45} & 11 & 2 & 1 & 0 & 0 & 0\\
\dynkin{1, 0, 0, 0, 0, 0, 0, 1} & \irrep{80} & 18 & 0 & \starred{1} & \starred{1} & \starred{1} & \starred{1}\\
\dynkin{0, 0, 1, 0, 0, 0, 0, 0} & \irrep{84} & 21 & 3 & 0 & 0 & 1 & 0\\
\dynkin{0, 0, 0, 1, 0, 0, 0, 0} & \irrep{126} & 35 & 4 & 0 & 0 & 0 & 1\\
\dynkin{3, 0, 0, 0, 0, 0, 0, 0} & \irrep{165} & 66 & 3 & 1 & 0 & 0 & 0\\
\dynkin{1, 1, 0, 0, 0, 0, 0, 0} & \irrep{240} & 78 & 3 & 0 & 0 & 0 & 0\\
\dynkin{0, 1, 0, 0, 0, 0, 0, 1} & \irrep{315} & 98 & 1 & 0 & 0 & 0 & 0\\
\dynkin{2, 0, 0, 0, 0, 0, 0, 1} & \irrep{396} & 143 & 1 & 1 & 0 & 0 & 0\\
\dynkin{4, 0, 0, 0, 0, 0, 0, 0} & \irrep{495} & 286 & 4 & 1 & 0 & 0 & 0\\
\dynkin{0, 2, 0, 0, 0, 0, 0, 0} & \irrep{540} & 231 & 4 & 0 & 1 & 0 & 0\\
\dynkin{1, 0, 1, 0, 0, 0, 0, 0} & \irrep{630} & 238 & 4 & 0 & 0 & 0 & 0\\
\dynkin{0, 0, 1, 0, 0, 0, 0, 1} & \irrep{720} & 266 & 2 & 0 & 0 & 0 & 0\\
\dynkin{2, 1, 0, 0, 0, 0, 0, 0} & \irrep{990} & 473 & 4 & 0 & 0 & 0 & 0\\
\dynkin{0, 0, 0, 0, 1, 0, 0, 1} & \irrep{1008} & 406 & 4 & 0 & 0 & 0 & 0\\
\dynkin{0, 0, 0, 1, 0, 0, 0, 1} & \irrep{1050} & 420 & 3 & 0 & 0 & 0 & 0\\
\dynkin{0, 1, 0, 0, 0, 0, 1, 0} & \irrep{1215} & 486 & 0 & 0 & \starred{1} & \starred{1} & \starred{1}\\
\dynkin{0, 0, 0, 0, 0, 0, 0, 5} & \irrep{1287} & 1001 & 4 & 1 & 0 & 0 & 0\\
\dynkin{3, 0, 0, 0, 0, 0, 0, 1} & \irrep{1440} & 748 & 2 & 1 & 0 & 0 & 0\\
\dynkin{2, 0, 0, 0, 0, 0, 1, 0} & \irrep{1540} & 693 & 0 & 0 & 0 & 0 & 0\\
\dynkin{0, 0, 0, 0, 0, 1, 1, 0} & \irrep{1890} & 903 & 4 & 0 & 0 & 0 & 0\\
\dynkin{2, 0, 0, 0, 0, 0, 0, 2} & \irrep{1944} & 972 & 0 & \starred{1} & \starred{1} & \starred{1} & \starred{1}\\
\end{longtable}
\vspace{-20pt}
\setlength{\tabcolsep}{1pt}
\begin{longtable}{lrrcccccc}
\caption{\label{tab:SU10Irreps}SU(10) Irreps}\\
\toprule
\rowcolor{tableheadcolor}
Dynkin & Dimension & l & & SU(9) & SU(8)${\times}$SU(2) & SU(7)${\times}$SU(3) & SU(6)${\times}$SU(4) & SU(5)${\times}$SU(5)\\
\rowcolor{tableheadcolor}label & (name) & (index) & Decality & singlets & singlets & singlets & singlets & singlets\\
\midrule
\endfirsthead
\caption[]{SU(10) Irreps (continued)}\\
\toprule
\rowcolor{tableheadcolor}
Dynkin & Dimension & l & & SU(9) & SU(8)${\times}$SU(2) & SU(7)${\times}$SU(3) & SU(6)${\times}$SU(4) & SU(5)${\times}$SU(5)\\
\rowcolor{tableheadcolor}label & (name) & (index) & Decality & singlets & singlets & singlets & singlets & singlets\\
\midrule
\endhead
\multicolumn{9}{>{\raggedright\arraybackslash}p{0.8\textwidth}}{\footnotesize $^\ast$SU(9)${\times}$U(1) and SU(8)${\times}$SU(2)${\times}$U(1) and SU(7)${\times}$SU(3)${\times}$U(1) and SU(6)${\times}$SU(4)${\times}$U(1) and SU(5)${\times}$SU(5)${\times}$U(1) singlets resp.}
\endfoot
\bottomrule
\multicolumn{9}{>{\raggedright\arraybackslash}p{0.8\textwidth}}{\footnotesize $^\ast$SU(9)${\times}$U(1) and SU(8)${\times}$SU(2)${\times}$U(1) and SU(7)${\times}$SU(3)${\times}$U(1) and SU(6)${\times}$SU(4)${\times}$U(1) and SU(5)${\times}$SU(5)${\times}$U(1) singlets resp.}
\endlastfoot
\dynkin{1, 0, 0, 0, 0, 0, 0, 0, 0} & \irrep{10} & 1 & 1 & 1 & 0 & 0 & 0 & 0\\
\dynkin{0, 1, 0, 0, 0, 0, 0, 0, 0} & \irrep{45} & 8 & 2 & 0 & 1 & 0 & 0 & 0\\
\dynkin{2, 0, 0, 0, 0, 0, 0, 0, 0} & \irrep{55} & 12 & 2 & 1 & 0 & 0 & 0 & 0\\
\dynkin{1, 0, 0, 0, 0, 0, 0, 0, 1} & \irrep{99} & 20 & 0 & \starred{1} & \starred{1} & \starred{1} & \starred{1} & \starred{1}\\
\dynkin{0, 0, 1, 0, 0, 0, 0, 0, 0} & \irrep{120} & 28 & 3 & 0 & 0 & 1 & 0 & 0\\
\dynkin{0, 0, 0, 1, 0, 0, 0, 0, 0} & \irrep{210} & 56 & 4 & 0 & 0 & 0 & 1 & 0\\
\dynkin{3, 0, 0, 0, 0, 0, 0, 0, 0} & \irrep{220} & 78 & 3 & 1 & 0 & 0 & 0 & 0\\
\dynkin{0, 0, 0, 0, 1, 0, 0, 0, 0} & \irrep{252} & 70 & 5 & 0 & 0 & 0 & 0 & 2\\
\dynkin{1, 1, 0, 0, 0, 0, 0, 0, 0} & \irrep{330} & 97 & 3 & 0 & 0 & 0 & 0 & 0\\
\dynkin{0, 1, 0, 0, 0, 0, 0, 0, 1} & \irrep{440} & 124 & 1 & 0 & 0 & 0 & 0 & 0\\
\dynkin{2, 0, 0, 0, 0, 0, 0, 0, 1} & \irrep{540} & 174 & 1 & 1 & 0 & 0 & 0 & 0\\
\dynkin{4, 0, 0, 0, 0, 0, 0, 0, 0} & \irrep{715} & 364 & 4 & 1 & 0 & 0 & 0 & 0\\
\dynkin{0, 2, 0, 0, 0, 0, 0, 0, 0} & \irrep{825} & 320 & 4 & 0 & 1 & 0 & 0 & 0\\
\dynkin{1, 0, 1, 0, 0, 0, 0, 0, 0} & \irrep{990} & 344 & 4 & 0 & 0 & 0 & 0 & 0\\
\dynkin{0, 0, 1, 0, 0, 0, 0, 0, 1} & \irrep{1155} & 392 & 2 & 0 & 0 & 0 & 0 & 0\\
\dynkin{2, 1, 0, 0, 0, 0, 0, 0, 0} & \irrep{1485} & 636 & 4 & 0 & 0 & 0 & 0 & 0\\
\dynkin{1, 0, 0, 1, 0, 0, 0, 0, 0} & \irrep{1848} & 700 & 5 & 0 & 0 & 0 & 0 & 0\\
\dynkin{0, 1, 0, 0, 0, 0, 0, 1, 0} & \irrep{1925} & 700 & 0 & 0 & \starred{1} & \starred{1} & \starred{1} & \starred{1}\\
\dynkin{0, 0, 0, 1, 0, 0, 0, 0, 1} & \irrep{1980} & 742 & 3 & 0 & 0 & 0 & 0 & 0\\
\end{longtable}
}
\newpage
\setlength{\tabcolsep}{0.5pt}
\begin{longtable}{lrrcccccc}
\caption{\label{tab:SU11Irreps}SU(11) Irreps}\\
\toprule
\rowcolor{tableheadcolor}
Dynkin & Dimension & l & & SU(10) & SU(9)${\times}$SU(2) & SU(8)${\times}$SU(3) & SU(7)${\times}$SU(4) & SU(6)${\times}$SU(5)\\
\rowcolor{tableheadcolor}label & (name) & (index) & Decality & singlets & singlets & singlets & singlets & singlets\\
\midrule
\endfirsthead
\caption[]{SU(11) Irreps (continued)}\\
\toprule
\rowcolor{tableheadcolor}
Dynkin & Dimension & l & & SU(10) & SU(9)${\times}$SU(2) & SU(8)${\times}$SU(3) & SU(7)${\times}$SU(4) & SU(6)${\times}$SU(5)\\
\rowcolor{tableheadcolor}label & (name) & (index) & Decality & singlets & singlets & singlets & singlets & singlets\\
\midrule
\endhead
\multicolumn{9}{>{\raggedright\arraybackslash}p{0.8\textwidth}}{\footnotesize $^\ast$SU(10)${\times}$U(1) and SU(9)${\times}$SU(2)${\times}$U(1) and SU(8)${\times}$SU(3)${\times}$U(1) and SU(7)${\times}$SU(4)${\times}$U(1) and SU(6)${\times}$SU(5)${\times}$U(1) singlets resp.}
\endfoot
\bottomrule
\multicolumn{9}{>{\raggedright\arraybackslash}p{0.8\textwidth}}{\footnotesize $^\ast$SU(10)${\times}$U(1) and SU(9)${\times}$SU(2)${\times}$U(1) and SU(8)${\times}$SU(3)${\times}$U(1) and SU(7)${\times}$SU(4)${\times}$U(1) and SU(6)${\times}$SU(5)${\times}$U(1) singlets resp.}
\endlastfoot
\dynkin{1, 0, 0, 0, 0, 0, 0, 0, 0, 0} & \irrep{11} & 1 & 1 & 1 & 0 & 0 & 0 & 0\\
\dynkin{0, 1, 0, 0, 0, 0, 0, 0, 0, 0} & \irrep{55} & 9 & 2 & 0 & 1 & 0 & 0 & 0\\
\dynkin{2, 0, 0, 0, 0, 0, 0, 0, 0, 0} & \irrep{66} & 13 & 2 & 1 & 0 & 0 & 0 & 0\\
\dynkin{1, 0, 0, 0, 0, 0, 0, 0, 0, 1} & \irrep{120} & 22 & 0 & \starred{1} & \starred{1} & \starred{1} & \starred{1} & \starred{1}\\
\dynkin{0, 0, 1, 0, 0, 0, 0, 0, 0, 0} & \irrep{165} & 36 & 3 & 0 & 0 & 1 & 0 & 0\\
\dynkin{3, 0, 0, 0, 0, 0, 0, 0, 0, 0} & \irrep{286} & 91 & 3 & 1 & 0 & 0 & 0 & 0\\
\dynkin{0, 0, 0, 1, 0, 0, 0, 0, 0, 0} & \irrep{330} & 84 & 4 & 0 & 0 & 0 & 1 & 0\\
\dynkin{1, 1, 0, 0, 0, 0, 0, 0, 0, 0} & \irrep{440} & 118 & 3 & 0 & 0 & 0 & 0 & 0\\
\dynkin{0, 0, 0, 0, 1, 0, 0, 0, 0, 0} & \irrep{462} & 126 & 5 & 0 & 0 & 0 & 0 & 1\\
\dynkin{0, 1, 0, 0, 0, 0, 0, 0, 0, 1} & \irrep{594} & 153 & 1 & 0 & 0 & 0 & 0 & 0\\
\end{longtable}
\begin{longtable}{lrrcccccc}
\caption{\label{tab:SU12Irreps}SU(12) Irreps}\\
\toprule
\rowcolor{tableheadcolor}
Dynkin & Dimension & l & & SU(11) & SU(10)${\times}$SU(2) & SU(9)${\times}$SU(3) & SU(7)${\times}$SU(5) & SU(6)${\times}$SU(6)\\
\rowcolor{tableheadcolor}label & (name) & (index) & Decality & singlets & singlets & singlets & singlets & singlets\\
\midrule
\endfirsthead
\caption[]{SU(12) Irreps (continued)}\\
\toprule
\rowcolor{tableheadcolor}
Dynkin & Dimension & l & & SU(11) & SU(10)${\times}$SU(2) & SU(9)${\times}$SU(3) & SU(7)${\times}$SU(5) & SU(6)${\times}$SU(6)\\
\rowcolor{tableheadcolor}label & (name) & (index) & Decality & singlets & singlets & singlets & singlets & singlets\\
\midrule
\endhead
\multicolumn{9}{>{\raggedright\arraybackslash}p{0.8\textwidth}}{\footnotesize $^\ast$SU(11)${\times}$U(1) and SU(10)${\times}$SU(2)${\times}$U(1) and SU(9)${\times}$SU(3)${\times}$U(1) and SU(7)${\times}$SU(5)${\times}$U(1) and SU(6)${\times}$SU(6)${\times}$U(1) singlets resp.}
\endfoot
\bottomrule
\multicolumn{9}{>{\raggedright\arraybackslash}p{0.8\textwidth}}{\footnotesize $^\ast$SU(11)${\times}$U(1) and SU(10)${\times}$SU(2)${\times}$U(1) and SU(9)${\times}$SU(3)${\times}$U(1) and SU(7)${\times}$SU(5)${\times}$U(1) and SU(6)${\times}$SU(6)${\times}$U(1) singlets resp.}
\endlastfoot
\dynkin{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} & \irrep{12} & 1 & 1 & 1 & 0 & 0 & 0 & 0\\
\dynkin{0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0} & \irrep{66} & 10 & 2 & 0 & 1 & 0 & 0 & 0\\
\dynkin{2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} & \irrep{78} & 14 & 2 & 1 & 0 & 0 & 0 & 0\\
\dynkin{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1} & \irrep{143} & 24 & 0 & \starred{1} & \starred{1} & \starred{1} & \starred{1} & \starred{1}\\
\dynkin{0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0} & \irrep{220} & 45 & 3 & 0 & 0 & 1 & 0 & 0\\
\dynkin{3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} & \irrep{364} & 105 & 3 & 1 & 0 & 0 & 0 & 0\\
\dynkin{0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0} & \irrep{495} & 120 & 4 & 0 & 0 & 0 & 0 & 0\\
\dynkin{1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0} & \irrep{572} & 141 & 3 & 0 & 0 & 0 & 0 & 0\\
\dynkin{0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1} & \irrep{780} & 185 & 1 & 0 & 0 & 0 & 0 & 0\\
\dynkin{0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0} & \irrep{792} & 210 & 5 & 0 & 0 & 0 & 1 & 0\\
\end{longtable}
\newpage
\subsubsection{\SO{N}}
\setlength{\tabcolsep}{7pt}
\begin{longtable}{lrrcc}
\caption{\label{tab:SO7Irreps}SO(7) Irreps}\\
\toprule
\rowcolor{tableheadcolor}
Dynkin & Dimension & l/2 & Congruency & SU(4)\\
\rowcolor{tableheadcolor}label & (name) & (index) & class & singlets\\
\midrule
\endfirsthead
\caption[]{SO(7) Irreps (continued)}\\
\toprule
\rowcolor{tableheadcolor}
Dynkin & Dimension & l/2 & Congruency & SU(4)\\
\rowcolor{tableheadcolor}label & (name) & (index) & class & singlets\\
\midrule
\endhead
\endfoot
\bottomrule
\endlastfoot
\dynkin{1, 0, 0} & \irrep{7} & 1 & 0 & 1\\
\dynkin{0, 0, 1} & \irrep{8} & 1 & 1 & 0\\
\dynkin{0, 1, 0} & \irrep{21} & 5 & 0 & 0\\
\dynkin{2, 0, 0} & \irrep{27} & 9 & 0 & 1\\
\dynkin{0, 0, 2} & \irrep{35} & 10 & 0 & 0\\
\dynkin{1, 0, 1} & \irrep{48} & 14 & 1 & 0\\
\dynkin{3, 0, 0} & \irrep{77} & 44 & 0 & 1\\
\dynkin{1, 1, 0} & \irrep{105} & 45 & 0 & 0\\
\dynkin{0, 1, 1} & \irrep{112} & 46 & 1 & 0\\
\dynkin{0, 0, 3} & \irrep[1]{112} & 54 & 1 & 0\\
\dynkin{2, 0, 1} & \irrep{168} & 85 & 1 & 0\\
\dynkin{0, 2, 0} & \irrep[1]{168} & 96 & 0 & 0\\
\dynkin{4, 0, 0} & \irrep{182} & 156 & 0 & 1\\
\dynkin{1, 0, 2} & \irrep{189} & 90 & 0 & 0\\
\dynkin{0, 0, 4} & \irrep{294} & 210 & 0 & 0\\
\dynkin{2, 1, 0} & \irrep{330} & 220 & 0 & 0\\
\dynkin{0, 1, 2} & \irrep{378} & 234 & 0 & 0\\
\dynkin{5, 0, 0} & \irrep[1]{378} & 450 & 0 & 1\\
\dynkin{3, 0, 1} & \irrep{448} & 344 & 1 & 0\\
\dynkin{1, 1, 1} & \irrep{512} & 320 & 1 & 0\\
\dynkin{1, 0, 3} & \irrep{560} & 390 & 1 & 0\\
\dynkin{2, 0, 2} & \irrep{616} & 440 & 0 & 0\\
\dynkin{0, 0, 5} & \irrep{672} & 660 & 1 & 0\\
\dynkin{1, 2, 0} & \irrep{693} & 561 & 0 & 0\\
\dynkin{6, 0, 0} & \irrep{714} & 1122 & 0 & 1\\
\dynkin{0, 2, 1} & \irrep{720} & 570 & 1 & 0\\
\dynkin{3, 1, 0} & \irrep{819} & 780 & 0 & 0\\
\dynkin{0, 3, 0} & \irrep{825} & 825 & 0 & 0\\
\dynkin{0, 1, 3} & \irrep{1008} & 870 & 1 & 0\\
\dynkin{4, 0, 1} & \irrep[1]{1008} & 1086 & 1 & 0\\
\dynkin{7, 0, 0} & \irrep{1254} & 2508 & 0 & 1\\
\dynkin{1, 0, 4} & \irrep{1386} & 1320 & 0 & 0\\
\dynkin{0, 0, 6} & \irrep[1]{1386} & 1782 & 0 & 0\\
\dynkin{2, 1, 1} & \irrep{1512} & 1341 & 1 & 0\\
\dynkin{3, 0, 2} & \irrep{1560} & 1560 & 0 & 0\\
\dynkin{1, 1, 2} & \irrep{1617} & 1386 & 0 & 0\\
\dynkin{2, 0, 3} & \irrep{1728} & 1656 & 1 & 0\\
\dynkin{4, 1, 0} & \irrep{1750} & 2250 & 0 & 0\\
\dynkin{2, 2, 0} & \irrep{1911} & 2093 & 0 & 0\\
\dynkin{5, 0, 1} & \irrep{2016} & 2892 & 1 & 0\\
\dynkin{0, 2, 2} & \irrep{2079} & 2178 & 0 & 0\\
\dynkin{8, 0, 0} & \irrep[1]{2079} & 5148 & 0 & 1\\
\dynkin{0, 1, 4} & \irrep{2310} & 2640 & 0 & 0\\
\dynkin{0, 0, 7} & \irrep{2640} & 4290 & 1 & 0\\
\dynkin{1, 2, 1} & \irrep{2800} & 2950 & 1 & 0\\
\dynkin{1, 3, 0} & \irrep{3003} & 3861 & 0 & 0\\
\dynkin{0, 4, 0} & \irrep[1]{3003} & 4576 & 0 & 0\\
\dynkin{1, 0, 5} & \irrep{3024} & 3762 & 1 & 0\\
\dynkin{0, 3, 1} & \irrep{3080} & 3905 & 1 & 0\\
\dynkin{9, 0, 0} & \irrep{3289} & 9867 & 0 & 1\\
\dynkin{5, 1, 0} & \irrep{3366} & 5610 & 0 & 0\\
\dynkin{4, 0, 2} & \irrep{3375} & 4500 & 0 & 0\\
\dynkin{3, 1, 1} & \irrep{3584} & 4288 & 1 & 0\\
\dynkin{6, 0, 1} & \irrep{3696} & 6798 & 1 & 0\\
\dynkin{2, 0, 4} & \irrep{4095} & 5070 & 0 & 0\\
\dynkin{1, 1, 3} & \irrep{4096} & 4608 & 1 & 0\\
\dynkin{3, 0, 3} & \irrep{4200} & 5325 & 1 & 0\\
\dynkin{3, 2, 0} & \irrep{4312} & 6160 & 0 & 0\\
\dynkin{2, 1, 2} & \irrep{4550} & 5200 & 0 & 0\\
\dynkin{0, 0, 8} & \irrep{4719} & 9438 & 0 & 0\\
\dynkin{0, 1, 5} & \irrep{4752} & 6930 & 1 & 0\\
\dynkin{0, 2, 3} & \irrep{4928} & 6600 & 1 & 0\\
\dynkincomma{10, 0, 0} & \irrep{5005} & 17875 & 0 & 1\\
\dynkin{6, 1, 0} & \irrep{5985} & 12540 & 0 & 0\\
\dynkin{1, 0, 6} & \irrep{6006} & 9438 & 0 & 0\\
\dynkin{7, 0, 1} & \irrep{6336} & 14520 & 1 & 0\\
\dynkin{5, 0, 2} & \irrep{6545} & 11220 & 0 & 0\\
\dynkincomma{11, 0, 0} & \irrep{7371} & 30888 & 0 & 1\\
\dynkin{2, 2, 1} & \irrep{7392} & 10076 & 1 & 0\\
\dynkin{4, 1, 1} & \irrep[1]{7392} & 11484 & 1 & 0\\
\dynkin{2, 3, 0} & \irrep{7560} & 12240 & 0 & 0\\
\dynkin{1, 2, 2} & \irrep{7722} & 10296 & 0 & 0\\
\dynkin{0, 3, 2} & \irrep{8008} & 12584 & 0 & 0\\
\dynkin{0, 0, 9} & \irrep[1]{8008} & 19305 & 1 & 0\\
\dynkin{4, 2, 0} & \irrep{8568} & 15504 & 0 & 0\\
\dynkin{2, 0, 5} & \irrep{8624} & 13398 & 1 & 0\\
\dynkin{4, 0, 3} & \irrep{8800} & 14300 & 1 & 0\\
\dynkin{0, 5, 0} & \irrep{8918} & 19110 & 0 & 0\\
\dynkin{1, 1, 4} & \irrep{9009} & 12870 & 0 & 0\\
\dynkin{0, 1, 6} & \irrep[1]{9009} & 16302 & 0 & 0\\
\dynkin{3, 0, 4} & \irrep{9625} & 15125 & 0 & 0\\
\dynkin{1, 4, 0} & \irrep{10010} & 18590 & 0 & 0\\
\dynkin{7, 1, 0} & \irrep[1]{10010} & 25740 & 0 & 0\\
\dynkin{0, 4, 1} & \irrep{10192} & 18746 & 1 & 0\\
\dynkin{0, 2, 4} & \irrep{10296} & 17160 & 0 & 0\\
\dynkin{8, 0, 1} & \irrep[1]{10296} & 28743 & 1 & 0\\
\dynkin{3, 1, 2} & \irrep{10395} & 15345 & 0 & 0\\
\dynkincomma{12, 0, 0} & \irrep{10556} & 51272 & 0 & 1\\
\dynkin{1, 3, 1} & \irrep{10752} & 16960 & 1 & 0\\
\dynkin{2, 1, 3} & \irrep{11088} & 15906 & 1 & 0\\
\dynkin{1, 0, 7} & \irrep[1]{11088} & 21450 & 1 & 0\\
\dynkin{6, 0, 2} & \irrep{11704} & 25080 & 0 & 0\\
\dynkincomma{0, 0, 10} & \irrep{13013} & 37180 & 0 & 0\\
\dynkin{5, 1, 1} & \irrep{13824} & 27072 & 1 & 0\\
\dynkincomma{13, 0, 0} & \irrep{14756} & 82212 & 0 & 1\\
\dynkin{5, 2, 0} & \irrep{15561} & 34827 & 0 & 0\\
\dynkin{3, 3, 0} & \irrep{15912} & 31824 & 0 & 0\\
\end{longtable}
\newpage
\begin{longtable}{lrrcc}
\caption{\label{tab:SO8Irreps}SO(8) Irreps}\\
\toprule
\rowcolor{tableheadcolor}
Dynkin & Dimension & l/2 & Congruency & SO(7)\\
\rowcolor{tableheadcolor}label & (name) & (index) & class & singlets\\
\midrule
\endfirsthead
\caption[]{SO(8) Irreps (continued)}\\
\toprule
\rowcolor{tableheadcolor}
Dynkin & Dimension & l/2 & Congruency & SO(7)\\
\rowcolor{tableheadcolor}label & (name) & (index) & class & singlets\\
\midrule
\endhead
\endfoot
\bottomrule
\endlastfoot
\dynkin{0, 0, 0, 1} & \irrepsub{8}{s} & 1 & (10) & 1\\
\dynkin{1, 0, 0, 0} & \irrepsub{8}{v} & 1 & (02) & 0\\
\dynkin{0, 0, 1, 0} & \irrepsub{8}{c} & 1 & (12) & 0\\
\dynkin{0, 1, 0, 0} & \irrep{28} & 6 & (00) & 0\\
\dynkin{2, 0, 0, 0} & \irrepsub{35}{v} & 10 & (00) & 0\\
\dynkin{0, 0, 2, 0} & \irrepsub{35}{c} & 10 & (00) & 0\\
\dynkin{0, 0, 0, 2} & \irrepsub{35}{s} & 10 & (00) & 1\\
\dynkin{1, 0, 1, 0} & \irrepsub{56}{s} & 15 & (10) & 0\\
\dynkin{0, 0, 1, 1} & \irrepsub{56}{v} & 15 & (02) & 0\\
\dynkin{1, 0, 0, 1} & \irrepsub{56}{c} & 15 & (12) & 0\\
\dynkin{0, 0, 0, 3} & \irrepsub{112}{s} & 54 & (10) & 1\\
\dynkin{3, 0, 0, 0} & \irrepsub{112}{v} & 54 & (02) & 0\\
\dynkin{0, 0, 3, 0} & \irrepsub{112}{c} & 54 & (12) & 0\\
\dynkin{0, 1, 0, 1} & \irrepsub{160}{s} & 60 & (10) & 0\\
\dynkin{1, 1, 0, 0} & \irrepsub{160}{v} & 60 & (02) & 0\\
\dynkin{0, 1, 1, 0} & \irrepsub{160}{c} & 60 & (12) & 0\\
\dynkin{2, 0, 0, 1} & \irrepsub{224}{vs} & 100 & (10) & 0\\
\dynkin{0, 0, 2, 1} & \irrepsub{224}{cs} & 100 & (10) & 0\\
\dynkin{1, 0, 2, 0} & \irrepsub{224}{cv} & 100 & (02) & 0\\
\dynkin{1, 0, 0, 2} & \irrepsub{224}{sv} & 100 & (02) & 0\\
\dynkin{2, 0, 1, 0} & \irrepsub{224}{vc} & 100 & (12) & 0\\
\dynkin{0, 0, 1, 2} & \irrepsub{224}{sc} & 100 & (12) & 0\\
\dynkin{4, 0, 0, 0} & \irrepsub{294}{v} & 210 & (00) & 0\\
\dynkin{0, 0, 4, 0} & \irrepsub{294}{c} & 210 & (00) & 0\\
\dynkin{0, 0, 0, 4} & \irrepsub{294}{s} & 210 & (00) & 1\\
\dynkin{0, 2, 0, 0} & \irrep{300} & 150 & (00) & 0\\
\dynkin{1, 0, 1, 1} & \irrep{350} & 150 & (00) & 0\\
\dynkin{2, 1, 0, 0} & \irrepsub{567}{v} & 324 & (00) & 0\\
\dynkin{0, 1, 2, 0} & \irrepsub{567}{c} & 324 & (00) & 0\\
\dynkin{0, 1, 0, 2} & \irrepsub{567}{s} & 324 & (00) & 0\\
\dynkin{3, 0, 1, 0} & \irrepsub{672}{vc} & 444 & (10) & 0\\
\dynkin{1, 0, 3, 0} & \irrepsub{672}{cv} & 444 & (10) & 0\\
\dynkin{0, 0, 3, 1} & \irrepsub{672}{cs} & 444 & (02) & 0\\
\dynkin{0, 0, 1, 3} & \irrepsub{672}{sc} & 444 & (02) & 0\\
\dynkin{3, 0, 0, 1} & \irrepsub{672}{vs} & 444 & (12) & 0\\
\dynkin{1, 0, 0, 3} & \irrepsub{672}{sv} & 444 & (12) & 0\\
\dynkin{0, 0, 0, 5} & \irrepsub[1]{672}{s} & 660 & (10) & 1\\
\dynkin{5, 0, 0, 0} & \irrepsub[1]{672}{v} & 660 & (02) & 0\\
\dynkin{0, 0, 5, 0} & \irrepsub[1]{672}{c} & 660 & (12) & 0\\
\dynkin{1, 1, 1, 0} & \irrepsub{840}{s} & 465 & (10) & 0\\
\dynkin{0, 1, 1, 1} & \irrepsub{840}{v} & 465 & (02) & 0\\
\dynkin{1, 1, 0, 1} & \irrepsub{840}{c} & 465 & (12) & 0\\
\dynkin{2, 0, 2, 0} & \irrepsub[1]{840}{s} & 540 & (00) & 0\\
\dynkin{2, 0, 0, 2} & \irrepsub[1]{840}{c} & 540 & (00) & 0\\
\dynkin{0, 0, 2, 2} & \irrepsub[1]{840}{v} & 540 & (00) & 0\\
\dynkin{1, 0, 1, 2} & \irrepsub{1296}{s} & 810 & (10) & 0\\
\dynkin{2, 0, 1, 1} & \irrepsub{1296}{v} & 810 & (02) & 0\\
\dynkin{1, 0, 2, 1} & \irrepsub{1296}{c} & 810 & (12) & 0\\
\dynkin{6, 0, 0, 0} & \irrepsub{1386}{v} & 1782 & (00) & 0\\
\dynkin{0, 0, 6, 0} & \irrepsub{1386}{c} & 1782 & (00) & 0\\
\dynkin{0, 0, 0, 6} & \irrepsub{1386}{s} & 1782 & (00) & 1\\
\dynkin{0, 2, 0, 1} & \irrepsub{1400}{s} & 975 & (10) & 0\\
\dynkin{1, 2, 0, 0} & \irrepsub{1400}{v} & 975 & (02) & 0\\
\dynkin{0, 2, 1, 0} & \irrepsub{1400}{c} & 975 & (12) & 0\\
\dynkin{0, 1, 0, 3} & \irrepsub{1568}{s} & 1260 & (10) & 0\\
\dynkin{3, 1, 0, 0} & \irrepsub{1568}{v} & 1260 & (02) & 0\\
\dynkin{0, 1, 3, 0} & \irrepsub{1568}{c} & 1260 & (12) & 0\\
\dynkin{4, 0, 0, 1} & \irrepsub{1680}{vs} & 1530 & (10) & 0\\
\dynkin{0, 0, 4, 1} & \irrepsub{1680}{cs} & 1530 & (10) & 0\\
\dynkin{1, 0, 4, 0} & \irrepsub{1680}{cv} & 1530 & (02) & 0\\
\dynkin{1, 0, 0, 4} & \irrepsub{1680}{sv} & 1530 & (02) & 0\\
\dynkin{4, 0, 1, 0} & \irrepsub{1680}{vc} & 1530 & (12) & 0\\
\dynkin{0, 0, 1, 4} & \irrepsub{1680}{sc} & 1530 & (12) & 0\\
\dynkin{0, 3, 0, 0} & \irrep{1925} & 1650 & (00) & 0\\
\dynkin{2, 0, 0, 3} & \irrepsub{2400}{sv} & 2100 & (10) & 0\\
\dynkin{0, 0, 2, 3} & \irrepsub{2400}{sc} & 2100 & (10) & 0\\
\dynkin{3, 0, 2, 0} & \irrepsub{2400}{vc} & 2100 & (02) & 0\\
\dynkin{3, 0, 0, 2} & \irrepsub{2400}{vs} & 2100 & (02) & 0\\
\dynkin{2, 0, 3, 0} & \irrepsub{2400}{cv} & 2100 & (12) & 0\\
\dynkin{0, 0, 3, 2} & \irrepsub{2400}{cs} & 2100 & (12) & 0\\
\dynkin{0, 0, 0, 7} & \irrepsub{2640}{s} & 4290 & (10) & 1\\
\dynkin{7, 0, 0, 0} & \irrepsub{2640}{v} & 4290 & (02) & 0\\
\dynkin{0, 0, 7, 0} & \irrepsub{2640}{c} & 4290 & (12) & 0\\
\dynkin{2, 1, 0, 1} & \irrepsub{2800}{vs} & 2150 & (10) & 0\\
\dynkin{0, 1, 2, 1} & \irrepsub{2800}{cs} & 2150 & (10) & 0\\
\dynkin{1, 1, 2, 0} & \irrepsub{2800}{cv} & 2150 & (02) & 0\\
\dynkin{1, 1, 0, 2} & \irrepsub{2800}{sv} & 2150 & (02) & 0\\
\dynkin{2, 1, 1, 0} & \irrepsub{2800}{vc} & 2150 & (12) & 0\\
\dynkin{0, 1, 1, 2} & \irrepsub{2800}{sc} & 2150 & (12) & 0\\
\dynkin{3, 0, 1, 1} & \irrepsub{3675}{v} & 3150 & (00) & 0\\
\dynkin{1, 0, 3, 1} & \irrepsub{3675}{c} & 3150 & (00) & 0\\
\dynkin{1, 0, 1, 3} & \irrepsub{3675}{s} & 3150 & (00) & 0\\
\dynkin{4, 1, 0, 0} & \irrepsub{3696}{v} & 3960 & (00) & 0\\
\dynkin{0, 1, 4, 0} & \irrepsub{3696}{c} & 3960 & (00) & 0\\
\dynkin{0, 1, 0, 4} & \irrepsub{3696}{s} & 3960 & (00) & 0\\
\dynkin{5, 0, 1, 0} & \irrepsub[1]{3696}{vc} & 4422 & (10) & 0\\
\dynkin{1, 0, 5, 0} & \irrepsub[1]{3696}{cv} & 4422 & (10) & 0\\
\dynkin{0, 0, 5, 1} & \irrepsub[1]{3696}{cs} & 4422 & (02) & 0\\
\dynkin{0, 0, 1, 5} & \irrepsub[1]{3696}{sc} & 4422 & (02) & 0\\
\dynkin{5, 0, 0, 1} & \irrepsub[1]{3696}{vs} & 4422 & (12) & 0\\
\dynkin{1, 0, 0, 5} & \irrepsub[1]{3696}{sv} & 4422 & (12) & 0\\
\dynkin{1, 1, 1, 1} & \irrep{4096} & 3072 & (00) & 0\\
\dynkin{2, 2, 0, 0} & \irrepsub{4312}{v} & 4004 & (00) & 0\\
\dynkin{0, 2, 2, 0} & \irrepsub{4312}{c} & 4004 & (00) & 0\\
\dynkin{0, 2, 0, 2} & \irrepsub{4312}{s} & 4004 & (00) & 0\\
\dynkin{2, 0, 2, 1} & \irrepsub{4536}{s} & 3807 & (10) & 0\\
\dynkin{1, 0, 2, 2} & \irrepsub{4536}{v} & 3807 & (02) & 0\\
\dynkin{2, 0, 1, 2} & \irrepsub{4536}{c} & 3807 & (12) & 0\\
\end{longtable}
\newpage
\begin{longtable}{lrrccc}
\caption{\label{tab:SO9Irreps}SO(9) Irreps}\\
\toprule
\rowcolor{tableheadcolor}
Dynkin & Dimension & l/2 & Congruency & SO(8) & SU(4)${\times}$SU(2)\\
\rowcolor{tableheadcolor}label & (name) & (index) & class & singlets & singlets\\
\midrule
\endfirsthead
\caption[]{SO(9) Irreps (continued)}\\
\toprule
\rowcolor{tableheadcolor}
Dynkin & Dimension & l/2 & Congruency & SO(8) & SU(4)${\times}$SU(2)\\
\rowcolor{tableheadcolor}label & (name) & (index) & class & singlets & singlets\\
\midrule
\endhead
\endfoot
\bottomrule
\endlastfoot
\dynkin{1, 0, 0, 0} & \irrep{9} & 1 & 0 & 1 & 0\\
\dynkin{0, 0, 0, 1} & \irrep{16} & 2 & 1 & 0 & 0\\
\dynkin{0, 1, 0, 0} & \irrep{36} & 7 & 0 & 0 & 0\\
\dynkin{2, 0, 0, 0} & \irrep{44} & 11 & 0 & 1 & 1\\
\dynkin{0, 0, 1, 0} & \irrep{84} & 21 & 0 & 0 & 1\\
\dynkin{0, 0, 0, 2} & \irrep{126} & 35 & 0 & 0 & 0\\
\dynkin{1, 0, 0, 1} & \irrep{128} & 32 & 1 & 0 & 0\\
\dynkin{3, 0, 0, 0} & \irrep{156} & 65 & 0 & 1 & 0\\
\dynkin{1, 1, 0, 0} & \irrep{231} & 77 & 0 & 0 & 0\\
\dynkin{0, 1, 0, 1} & \irrep{432} & 150 & 1 & 0 & 0\\
\dynkin{4, 0, 0, 0} & \irrep{450} & 275 & 0 & 1 & 1\\
\dynkin{0, 2, 0, 0} & \irrep{495} & 220 & 0 & 0 & 1\\
\dynkin{2, 0, 0, 1} & \irrep{576} & 232 & 1 & 0 & 0\\
\dynkin{1, 0, 1, 0} & \irrep{594} & 231 & 0 & 0 & 0\\
\dynkin{0, 0, 0, 3} & \irrep{672} & 308 & 1 & 0 & 0\\
\dynkin{0, 0, 1, 1} & \irrep{768} & 320 & 1 & 0 & 0\\
\dynkin{2, 1, 0, 0} & \irrep{910} & 455 & 0 & 0 & 0\\
\dynkin{1, 0, 0, 2} & \irrep{924} & 385 & 0 & 0 & 0\\
\dynkin{5, 0, 0, 0} & \irrep{1122} & 935 & 0 & 1 & 0\\
\dynkin{0, 1, 1, 0} & \irrep{1650} & 825 & 0 & 0 & 0\\
\dynkin{3, 0, 0, 1} & \irrep{1920} & 1120 & 1 & 0 & 0\\
\dynkin{0, 0, 2, 0} & \irrep{1980} & 1155 & 0 & 0 & 1\\
\dynkin{2, 0, 1, 0} & \irrep{2457} & 1365 & 0 & 0 & 1\\
\dynkin{6, 0, 0, 0} & \irrep{2508} & 2717 & 0 & 1 & 1\\
\dynkin{1, 1, 0, 1} & \irrep{2560} & 1280 & 1 & 0 & 0\\
\dynkin{1, 2, 0, 0} & \irrep{2574} & 1573 & 0 & 0 & 0\\
\dynkin{0, 1, 0, 2} & \irrep{2772} & 1463 & 0 & 0 & 0\\
\dynkin{0, 0, 0, 4} & \irrep[1]{2772} & 1848 & 0 & 0 & 0\\
\dynkin{3, 1, 0, 0} & \irrep[2]{2772} & 1925 & 0 & 0 & 0\\
\dynkin{2, 0, 0, 2} & \irrep{3900} & 2275 & 0 & 0 & 0\\
\dynkin{0, 3, 0, 0} & \irrep{4004} & 3003 & 0 & 0 & 0\\
\dynkin{0, 0, 1, 2} & \irrep{4158} & 2541 & 0 & 0 & 0\\
\dynkin{1, 0, 0, 3} & \irrep{4608} & 2816 & 1 & 0 & 0\\
\dynkin{0, 2, 0, 1} & \irrep{4928} & 3080 & 1 & 0 & 0\\
\dynkin{1, 0, 1, 1} & \irrep{5040} & 2870 & 1 & 0 & 0\\
\dynkin{7, 0, 0, 0} & \irrep{5148} & 7007 & 0 & 1 & 0\\
\dynkin{4, 0, 0, 1} & \irrep{5280} & 4180 & 1 & 0 & 0\\
\dynkin{4, 1, 0, 0} & \irrep{7140} & 6545 & 0 & 0 & 0\\
\dynkin{3, 0, 1, 0} & \irrep{7700} & 5775 & 0 & 0 & 0\\
\dynkin{2, 2, 0, 0} & \irrep{8748} & 7047 & 0 & 0 & 1\\
\dynkin{1, 1, 1, 0} & \irrep{9009} & 6006 & 0 & 0 & 0\\
\dynkin{2, 1, 0, 1} & \irrep{9504} & 6468 & 1 & 0 & 0\\
\dynkin{0, 0, 0, 5} & \irrep[1]{9504} & 8580 & 1 & 0 & 0\\
\dynkin{8, 0, 0, 0} & \irrep{9867} & 16445 & 0 & 1 & 1\\
\dynkin{1, 0, 2, 0} & \irrep{12012} & 9009 & 0 & 0 & 0\\
\dynkin{3, 0, 0, 2} & \irrep{12375} & 9625 & 0 & 0 & 0\\
\dynkin{0, 1, 1, 1} & \irrep{12672} & 8800 & 1 & 0 & 0\\
\dynkin{0, 1, 0, 3} & \irrep[1]{12672} & 9328 & 1 & 0 & 0\\
\dynkin{5, 0, 0, 1} & \irrep[2]{12672} & 13024 & 1 & 0 & 0\\
\dynkin{0, 0, 2, 1} & \irrep{13200} & 10450 & 1 & 0 & 0\\
\dynkin{1, 1, 0, 2} & \irrep{15444} & 10725 & 0 & 0 & 0\\
\dynkin{0, 2, 1, 0} & \irrep[1]{15444} & 12441 & 0 & 0 & 1\\
\dynkin{5, 1, 0, 0} & \irrep{16302} & 19019 & 0 & 0 & 0\\
\dynkin{0, 0, 1, 3} & \irrep{16896} & 14080 & 1 & 0 & 0\\
\dynkin{9, 0, 0, 0} & \irrep{17875} & 35750 & 0 & 1 & 0\\
\dynkin{1, 0, 0, 4} & \irrep{18018} & 15015 & 0 & 0 & 0\\
\dynkin{1, 3, 0, 0} & \irrep[1]{18018} & 17017 & 0 & 0 & 0\\
\dynkin{2, 0, 0, 3} & \irrep{18480} & 14630 & 1 & 0 & 0\\
\dynkin{2, 0, 1, 1} & \irrep{19712} & 14784 & 1 & 0 & 0\\
\dynkin{4, 0, 1, 0} & \irrep{20196} & 19635 & 0 & 0 & 1\\
\dynkin{0, 4, 0, 0} & \irrep{22932} & 25480 & 0 & 0 & 1\\
\dynkin{0, 0, 3, 0} & \irrep{23595} & 23595 & 0 & 0 & 1\\
\dynkin{3, 2, 0, 0} & \irrep{23868} & 24531 & 0 & 0 & 0\\
\dynkin{1, 2, 0, 1} & \irrep{24192} & 19488 & 1 & 0 & 0\\
\dynkin{1, 0, 1, 2} & \irrep{25740} & 20020 & 0 & 0 & 0\\
\dynkin{0, 1, 2, 0} & \irrep{27027} & 24024 & 0 & 0 & 0\\
\dynkin{0, 2, 0, 2} & \irrep{27456} & 22880 & 0 & 0 & 0\\
\dynkin{6, 0, 0, 1} & \irrep[1]{27456} & 35464 & 1 & 0 & 0\\
\dynkin{3, 1, 0, 1} & \irrep{27648} & 24576 & 1 & 0 & 0\\
\dynkin{0, 0, 0, 6} & \irrep{28314} & 33033 & 0 & 0 & 0\\
\dynkin{2, 1, 1, 0} & \irrep{31500} & 27125 & 0 & 0 & 0\\
\dynkin{4, 0, 0, 2} & \irrep{32725} & 32725 & 0 & 0 & 0\\
\dynkin{6, 1, 0, 0} & \irrep{33957} & 49049 & 0 & 0 & 0\\
\dynkin{0, 3, 0, 1} & \irrep{34944} & 33488 & 1 & 0 & 0\\
\dynkin{2, 0, 2, 0} & \irrep{44352} & 41888 & 0 & 0 & 1\\
\dynkin{0, 1, 0, 4} & \irrep{46332} & 45045 & 0 & 0 & 0\\
\dynkin{5, 0, 1, 0} & \irrep{46683} & 57057 & 0 & 0 & 0\\
\dynkin{2, 1, 0, 2} & \irrep{54675} & 48600 & 0 & 0 & 0\\
\dynkin{7, 0, 0, 1} & \irrep{54912} & 86944 & 1 & 0 & 0\\
\dynkin{2, 3, 0, 0} & \irrep{54978} & 64141 & 0 & 0 & 0\\
\dynkin{3, 0, 0, 3} & \irrep{56320} & 56320 & 1 & 0 & 0\\
\dynkin{4, 2, 0, 0} & \irrep{56430} & 72105 & 0 & 0 & 1\\
\dynkin{0, 0, 2, 2} & \irrep{56628} & 58201 & 0 & 0 & 0\\
\dynkin{0, 0, 1, 4} & \irrep[1]{56628} & 61347 & 0 & 0 & 0\\
\dynkin{3, 0, 1, 1} & \irrep{59136} & 56672 & 1 & 0 & 0\\
\dynkin{1, 0, 0, 5} & \irrep[1]{59136} & 64064 & 1 & 0 & 0\\
\dynkin{0, 1, 1, 2} & \irrep{60060} & 55055 & 0 & 0 & 0\\
\dynkin{1, 1, 1, 1} & \irrep{65536} & 57344 & 1 & 0 & 0\\
\dynkin{7, 1, 0, 0} & \irrep{65780} & 115115 & 0 & 0 & 0\\
\dynkin{1, 1, 0, 3} & \irrep{67200} & 61600 & 1 & 0 & 0\\
\dynkin{4, 1, 0, 1} & \irrep{68640} & 77220 & 1 & 0 & 0\\
\dynkin{2, 0, 0, 4} & \irrep{69300} & 71225 & 0 & 0 & 0\\
\dynkin{1, 2, 1, 0} & \irrep{71500} & 71500 & 0 & 0 & 0\\
\dynkin{0, 0, 0, 7} & \irrep{75504} & 110110 & 1 & 0 & 0\\
\dynkin{1, 0, 2, 1} & \irrep{76032} & 73920 & 1 & 0 & 0\\
\dynkin{5, 0, 0, 2} & \irrep{76076} & 95095 & 0 & 0 & 0\\
\dynkin{2, 2, 0, 1} & \irrep{78624} & 79716 & 1 & 0 & 0\\
\dynkin{3, 1, 1, 0} & \irrep{87516} & 94809 & 0 & 0 & 0\\
\end{longtable}
\newpage
{
\setlength{\tabcolsep}{3pt}
\begin{longtable}{lrrccccc}
\caption{\label{tab:SO10Irreps}SO(10) Irreps}\\
\toprule
\rowcolor{tableheadcolor}
Dynkin & Dimension & l/2 & Congruency & SU(5) & SU(2)${\times}$SU(2)${\times}$SU(4) & SO(9) & SU(2)${\times}$SO(7)\\
\rowcolor{tableheadcolor}label & (name) & (index) & class & singlets & singlets & singlets & singlets\\
\midrule
\endfirsthead
\caption[]{SO(10) Irreps (continued)}\\
\toprule
\rowcolor{tableheadcolor}
Dynkin & Dimension & l/2 & Congruency & SU(5) & SU(2)${\times}$SU(2)${\times}$SU(4) & SO(9) & SU(2)${\times}$SO(7)\\
\rowcolor{tableheadcolor}label & (name) & (index) & class & singlets & singlets & singlets & singlets\\
\midrule
\endhead
\multicolumn{8}{l}{\footnotesize $^\ast$SU(5)${\times}$U(1) singlets resp.}
\endfoot
\bottomrule
\multicolumn{8}{l}{\footnotesize $^\ast$SU(5)${\times}$U(1) singlets resp.}
\endlastfoot
\dynkin{1, 0, 0, 0, 0} & \irrep{10} & 1 & (02) & 0 & 0 & 1 & 0\\
\dynkin{0, 0, 0, 0, 1} & \irrep{16} & 2 & (11) & 1 & 0 & 0 & 0\\
\dynkin{0, 1, 0, 0, 0} & \irrep{45} & 8 & (00) & \starred{1} & 0 & 0 & 0\\
\dynkin{2, 0, 0, 0, 0} & \irrep{54} & 12 & (00) & 0 & 1 & 1 & 1\\
\dynkin{0, 0, 1, 0, 0} & \irrep{120} & 28 & (02) & 0 & 0 & 0 & 1\\
\dynkin{0, 0, 0, 2, 0} & \irrep{126} & 35 & (02) & 1 & 0 & 0 & 0\\
\dynkin{1, 0, 0, 1, 0} & \irrep{144} & 34 & (11) & 0 & 0 & 0 & 0\\
\dynkin{0, 0, 0, 1, 1} & \irrep{210} & 56 & (00) & \starred{1} & 1 & 0 & 0\\
\dynkin{3, 0, 0, 0, 0} & \irrep[1]{210} & 77 & (02) & 0 & 0 & 1 & 0\\
\dynkin{1, 1, 0, 0, 0} & \irrep{320} & 96 & (02) & 0 & 0 & 0 & 0\\
\dynkin{0, 1, 0, 0, 1} & \irrep{560} & 182 & (11) & 1 & 0 & 0 & 0\\
\dynkin{4, 0, 0, 0, 0} & \irrep{660} & 352 & (00) & 0 & 1 & 1 & 1\\
\dynkin{0, 0, 0, 3, 0} & \irrep{672} & 308 & (11) & 1 & 0 & 0 & 0\\
\dynkin{2, 0, 0, 0, 1} & \irrep{720} & 266 & (11) & 0 & 0 & 0 & 0\\
\dynkin{0, 2, 0, 0, 0} & \irrep{770} & 308 & (00) & \starred{1} & 1 & 0 & 1\\
\dynkin{1, 0, 1, 0, 0} & \irrep{945} & 336 & (00) & 0 & 0 & 0 & 0\\
\dynkin{1, 0, 0, 2, 0} & \irrep{1050} & 420 & (00) & 0 & 0 & 0 & 0\\
\dynkin{0, 0, 1, 1, 0} & \irrep{1200} & 470 & (11) & 0 & 0 & 0 & 0\\
\dynkin{2, 1, 0, 0, 0} & \irrep{1386} & 616 & (00) & 0 & 0 & 0 & 0\\
\dynkin{0, 0, 0, 1, 2} & \irrep{1440} & 628 & (11) & 1 & 0 & 0 & 0\\
\dynkin{1, 0, 0, 1, 1} & \irrep{1728} & 672 & (02) & 0 & 0 & 0 & 0\\
\dynkin{5, 0, 0, 0, 0} & \irrep{1782} & 1287 & (02) & 0 & 0 & 1 & 0\\
\dynkin{3, 0, 0, 1, 0} & \irrep{2640} & 1386 & (11) & 0 & 0 & 0 & 0\\
\dynkin{0, 0, 0, 4, 0} & \irrep{2772} & 1848 & (00) & 1 & 0 & 0 & 0\\
\dynkin{0, 1, 1, 0, 0} & \irrep{2970} & 1353 & (02) & 0 & 0 & 0 & 0\\
\dynkin{1, 1, 0, 1, 0} & \irrep{3696} & 1694 & (11) & 0 & 0 & 0 & 0\\
\dynkin{0, 1, 0, 2, 0} & \irrep[1]{3696} & 1848 & (02) & 1 & 0 & 0 & 0\\
\dynkin{0, 0, 2, 0, 0} & \irrep{4125} & 2200 & (00) & 0 & 1 & 0 & 1\\
\dynkin{6, 0, 0, 0, 0} & \irrep{4290} & 4004 & (00) & 0 & 1 & 1 & 1\\
\dynkin{2, 0, 1, 0, 0} & \irrep{4312} & 2156 & (02) & 0 & 0 & 0 & 1\\
\dynkin{1, 2, 0, 0, 0} & \irrep{4410} & 2401 & (02) & 0 & 0 & 0 & 0\\
\dynkin{3, 1, 0, 0, 0} & \irrep{4608} & 2816 & (02) & 0 & 0 & 0 & 0\\
\dynkin{2, 0, 0, 2, 0} & \irrep{4950} & 2695 & (02) & 0 & 0 & 0 & 0\\
\dynkin{1, 0, 0, 0, 3} & \irrep{5280} & 3124 & (11) & 0 & 0 & 0 & 0\\
\dynkin{0, 1, 0, 1, 1} & \irrep{5940} & 2904 & (00) & \starred{1} & 0 & 0 & 0\\
\dynkin{0, 0, 1, 2, 0} & \irrep{6930} & 4004 & (00) & 0 & 0 & 0 & 0\\
\dynkin{0, 0, 0, 3, 1} & \irrep[1]{6930} & 4389 & (02) & 1 & 0 & 0 & 0\\
\dynkin{0, 3, 0, 0, 0} & \irrep{7644} & 5096 & (00) & \starred{1} & 0 & 0 & 0\\
\dynkin{4, 0, 0, 0, 1} & \irrep{7920} & 5566 & (11) & 0 & 0 & 0 & 0\\
\dynkin{0, 2, 0, 0, 1} & \irrep{8064} & 4592 & (11) & 1 & 0 & 0 & 0\\
\dynkin{2, 0, 0, 1, 1} & \irrep{8085} & 4312 & (00) & 0 & 1 & 0 & 0\\
\dynkin{1, 0, 1, 0, 1} & \irrep{8800} & 4620 & (11) & 0 & 0 & 0 & 0\\
\dynkin{0, 0, 0, 2, 2} & \irrep{8910} & 5544 & (00) & \starred{1} & 1 & 0 & 0\\
\dynkin{7, 0, 0, 0, 0} & \irrep{9438} & 11011 & (02) & 0 & 0 & 1 & 0\\
\dynkin{0, 0, 0, 0, 5} & \irrep{9504} & 8580 & (11) & 1 & 0 & 0 & 0\\
\dynkin{0, 0, 1, 1, 1} & \irrep{10560} & 5984 & (02) & 0 & 0 & 0 & 0\\
\dynkin{1, 0, 0, 2, 1} & \irrep{11088} & 6314 & (11) & 0 & 0 & 0 & 0\\
\dynkin{4, 1, 0, 0, 0} & \irrep{12870} & 10296 & (00) & 0 & 0 & 0 & 0\\
\dynkin{3, 0, 1, 0, 0} & \irrep{14784} & 9856 & (00) & 0 & 0 & 0 & 0\\
\dynkin{2, 1, 0, 0, 1} & \irrep{15120} & 9282 & (11) & 0 & 0 & 0 & 0\\
\dynkin{2, 2, 0, 0, 0} & \irrep{16380} & 11648 & (00) & 0 & 1 & 0 & 1\\
\dynkin{0, 1, 0, 3, 0} & \irrep{17280} & 12144 & (11) & 1 & 0 & 0 & 0\\
\dynkin{3, 0, 0, 2, 0} & \irrep{17325} & 12320 & (00) & 0 & 0 & 0 & 0\\
\dynkin{1, 1, 1, 0, 0} & \irrep{17920} & 10752 & (00) & 0 & 0 & 0 & 0\\
\dynkin{8, 0, 0, 0, 0} & \irrep{19305} & 27456 & (00) & 0 & 1 & 1 & 1\\
\dynkin{5, 0, 0, 1, 0} & \irrep{20592} & 18590 & (11) & 0 & 0 & 0 & 0\\
\dynkin{1, 0, 0, 4, 0} & \irrep{20790} & 16863 & (02) & 0 & 0 & 0 & 0\\
\dynkin{1, 1, 0, 2, 0} & \irrep{23040} & 14848 & (00) & 0 & 0 & 0 & 0\\
\dynkin{2, 0, 0, 3, 0} & \irrep{23760} & 17754 & (11) & 0 & 0 & 0 & 0\\
\dynkin{0, 1, 1, 1, 0} & \irrep{25200} & 16030 & (11) & 0 & 0 & 0 & 0\\
\dynkin{0, 0, 0, 4, 1} & \irrep{26400} & 22660 & (11) & 1 & 0 & 0 & 0\\
\dynkin{1, 0, 2, 0, 0} & \irrep{27720} & 18788 & (02) & 0 & 0 & 0 & 0\\
\dynkin{3, 0, 0, 1, 1} & \irrep{28160} & 19712 & (02) & 0 & 0 & 0 & 0\\
\dynkin{0, 0, 0, 6, 0} & \irrep{28314} & 33033 & (02) & 1 & 0 & 0 & 0\\
\dynkin{0, 0, 1, 0, 3} & \irrep{29568} & 23408 & (11) & 0 & 0 & 0 & 0\\
\dynkin{0, 0, 2, 0, 1} & \irrep{30800} & 22330 & (11) & 0 & 0 & 0 & 0\\
\dynkin{5, 1, 0, 0, 0} & \irrep{31680} & 32032 & (02) & 0 & 0 & 0 & 0\\
\dynkin{0, 2, 1, 0, 0} & \irrep{34398} & 24843 & (02) & 0 & 0 & 0 & 1\\
\dynkin{0, 1, 0, 1, 2} & \irrep{34992} & 23814 & (11) & 1 & 0 & 0 & 0\\
\dynkin{1, 1, 0, 1, 1} & \irrep{36750} & 23275 & (02) & 0 & 0 & 0 & 0\\
\dynkin{9, 0, 0, 0, 0} & \irrep{37180} & 63206 & (02) & 0 & 0 & 1 & 0\\
\dynkin{1, 3, 0, 0, 0} & \irrep{37632} & 31360 & (02) & 0 & 0 & 0 & 0\\
\dynkin{2, 0, 1, 1, 0} & \irrep{38016} & 25872 & (11) & 0 & 0 & 0 & 0\\
\dynkin{0, 0, 0, 2, 3} & \irrep{39600} & 33110 & (11) & 1 & 0 & 0 & 0\\
\dynkin{4, 0, 1, 0, 0} & \irrep{42120} & 36036 & (02) & 0 & 0 & 0 & 1\\
\dynkin{1, 2, 0, 1, 0} & \irrep{43680} & 31668 & (11) & 0 & 0 & 0 & 0\\
\dynkin{0, 2, 0, 2, 0} & \irrep{46800} & 35880 & (02) & 1 & 0 & 0 & 0\\
\dynkin{3, 1, 0, 1, 0} & \irrep{48048} & 38038 & (11) & 0 & 0 & 0 & 0\\
\dynkin{6, 0, 0, 0, 1} & \irrep[1]{48048} & 54054 & (11) & 0 & 0 & 0 & 0\\
\dynkin{1, 0, 1, 2, 0} & \irrep{48114} & 34749 & (02) & 0 & 0 & 0 & 0\\
\dynkin{3, 2, 0, 0, 0} & \irrep{48510} & 43659 & (02) & 0 & 0 & 0 & 0\\
\dynkin{2, 0, 0, 1, 2} & \irrep{49280} & 35728 & (11) & 0 & 0 & 0 & 0\\
\dynkin{4, 0, 0, 2, 0} & \irrep{50050} & 45045 & (02) & 0 & 0 & 0 & 0\\
\dynkin{1, 0, 0, 3, 1} & \irrep{50688} & 39424 & (00) & 0 & 0 & 0 & 0\\
\dynkin{0, 4, 0, 0, 0} & \irrep{52920} & 51744 & (00) & \starred{1} & 1 & 0 & 1\\
\dynkin{0, 0, 1, 2, 1} & \irrep{55440} & 42658 & (11) & 0 & 0 & 0 & 0\\
\dynkin{0, 1, 0, 4, 0} & \irrep{64350} & 60060 & (00) & 1 & 0 & 0 & 0\\
\dynkin{1, 0, 0, 2, 2} & \irrep{64680} & 49588 & (02) & 0 & 0 & 0 & 0\\
\dynkincomma{10, 0, 0, 0, 0} & \irrep{68068} & 136136 & (00) & 0 & 1 & 1 & 1\\
\dynkin{2, 1, 1, 0, 0} & \irrep{68640} & 52624 & (02) & 0 & 0 & 0 & 0\\
\dynkin{1, 0, 0, 5, 0} & \irrep[1]{68640} & 72644 & (11) & 0 & 0 & 0 & 0\\
\dynkin{0, 1, 2, 0, 0} & \irrep{70070} & 56056 & (00) & 0 & 0 & 0 & 0\\
\dynkin{0, 0, 3, 0, 0} & \irrep[1]{70070} & 63063 & (02) & 0 & 0 & 0 & 1\\
\dynkin{0, 3, 0, 0, 1} & \irrep{70560} & 60564 & (11) & 1 & 0 & 0 & 0\\
\dynkin{6, 1, 0, 0, 0} & \irrep{70785} & 88088 & (00) & 0 & 0 & 0 & 0\\
\dynkin{1, 0, 1, 1, 1} & \irrep{72765} & 51744 & (00) & 0 & 0 & 0 & 0\\
\end{longtable}
}
\newpage
\begin{longtable}{lrrc}
\caption{\label{tab:SO11Irreps}SO(11) Irreps}\\
\toprule
\rowcolor{tableheadcolor}
Dynkin & Dimension & l/2 & Congruency\\
\rowcolor{tableheadcolor}label & (name) & (index) & class\\
\midrule
\endfirsthead
\caption[]{SO(11) Irreps (continued)}\\
\toprule
\rowcolor{tableheadcolor}
Dynkin & Dimension & l/2 & Congruency\\
\rowcolor{tableheadcolor}label & (name) & (index) & class\\
\midrule
\endhead
\endfoot
\bottomrule
\endlastfoot
\dynkin{1, 0, 0, 0, 0} & \irrep{11} & 1 & 0\\
\dynkin{0, 0, 0, 0, 1} & \irrep{32} & 4 & 1\\
\dynkin{0, 1, 0, 0, 0} & \irrep{55} & 9 & 0\\
\dynkin{2, 0, 0, 0, 0} & \irrep{65} & 13 & 0\\
\dynkin{0, 0, 1, 0, 0} & \irrep{165} & 36 & 0\\
\dynkin{3, 0, 0, 0, 0} & \irrep{275} & 90 & 0\\
\dynkin{1, 0, 0, 0, 1} & \irrep{320} & 72 & 1\\
\dynkin{0, 0, 0, 1, 0} & \irrep{330} & 84 & 0\\
\dynkin{1, 1, 0, 0, 0} & \irrep{429} & 117 & 0\\
\dynkin{0, 0, 0, 0, 2} & \irrep{462} & 126 & 0\\
\dynkin{4, 0, 0, 0, 0} & \irrep{935} & 442 & 0\\
\dynkin{0, 2, 0, 0, 0} & \irrep{1144} & 416 & 0\\
\dynkin{0, 1, 0, 0, 1} & \irrep{1408} & 432 & 1\\
\dynkin{1, 0, 1, 0, 0} & \irrep{1430} & 468 & 0\\
\dynkin{2, 0, 0, 0, 1} & \irrep{1760} & 604 & 1\\
\dynkin{2, 1, 0, 0, 0} & \irrep{2025} & 810 & 0\\
\dynkin{5, 0, 0, 0, 0} & \irrep{2717} & 1729 & 0\\
\dynkin{1, 0, 0, 1, 0} & \irrep{3003} & 1092 & 0\\
\dynkin{0, 0, 1, 0, 1} & \irrep{3520} & 1304 & 1\\
\dynkin{0, 0, 0, 0, 3} & \irrep{4224} & 1872 & 1\\
\dynkin{1, 0, 0, 0, 2} & \irrep{4290} & 1638 & 0\\
\dynkin{0, 1, 1, 0, 0} & \irrep{5005} & 2093 & 0\\
\dynkin{0, 0, 0, 1, 1} & \irrep{5280} & 2196 & 1\\
\dynkin{6, 0, 0, 0, 0} & \irrep{7007} & 5733 & 0\\
\dynkin{3, 0, 0, 0, 1} & \irrep{7040} & 3376 & 1\\
\dynkin{2, 0, 1, 0, 0} & \irrep{7128} & 3240 & 0\\
\dynkin{1, 2, 0, 0, 0} & \irrep{7150} & 3510 & 0\\
\dynkin{3, 1, 0, 0, 0} & \irrep{7293} & 3978 & 0\\
\dynkin{0, 0, 2, 0, 0} & \irrep{7865} & 3861 & 0\\
\dynkin{1, 1, 0, 0, 1} & \irrep{10240} & 4352 & 1\\
\dynkin{0, 1, 0, 1, 0} & \irrep{11583} & 5265 & 0\\
\dynkin{0, 3, 0, 0, 0} & \irrep{13650} & 8190 & 0\\
\dynkin{2, 0, 0, 1, 0} & \irrep{15400} & 7560 & 0\\
\dynkin{7, 0, 0, 0, 0} & \irrep{16445} & 16744 & 0\\
\dynkin{0, 1, 0, 0, 2} & \irrep{17160} & 8112 & 0\\
\dynkin{4, 1, 0, 0, 0} & \irrep{21945} & 15561 & 0\\
\dynkin{2, 0, 0, 0, 2} & \irrep{22275} & 11340 & 0\\
\dynkin{4, 0, 0, 0, 1} & \irrep{22880} & 14508 & 1\\
\dynkin{0, 0, 1, 1, 0} & \irrep{23595} & 12441 & 0\\
\dynkin{0, 0, 0, 2, 0} & \irrep[1]{23595} & 13728 & 0\\
\dynkin{0, 2, 0, 0, 1} & \irrep{24960} & 13104 & 1\\
\dynkin{3, 0, 1, 0, 0} & \irrep{26520} & 15912 & 0\\
\dynkin{0, 0, 0, 0, 4} & \irrep{28314} & 18018 & 0\\
\dynkin{1, 0, 1, 0, 1} & \irrep{28512} & 13932 & 1\\
\dynkin{2, 2, 0, 0, 0} & \irrep{28798} & 18326 & 0\\
\dynkin{1, 1, 1, 0, 0} & \irrep{33033} & 18018 & 0\\
\dynkin{8, 0, 0, 0, 0} & \irrep{35750} & 44200 & 0\\
\dynkin{1, 0, 0, 0, 3} & \irrep{36960} & 20748 & 1\\
\dynkin{0, 0, 1, 0, 2} & \irrep{37752} & 20592 & 0\\
\end{longtable}
\newpage
\begin{longtable}{lrrc}
\caption{\label{tab:SO12Irreps}SO(12) Irreps}\\
\toprule
\rowcolor{tableheadcolor}
Dynkin & Dimension & l/2 & Congruency\\
\rowcolor{tableheadcolor}label & (name) & (index) & class\\
\midrule
\endfirsthead
\caption[]{SO(12) Irreps (continued)}\\
\toprule
\rowcolor{tableheadcolor}
Dynkin & Dimension & l/2 & Congruency\\
\rowcolor{tableheadcolor}label & (name) & (index) & class\\
\midrule
\endhead
\endfoot
\bottomrule
\endlastfoot
\dynkin{1, 0, 0, 0, 0, 0} & \irrep{12} & 1 & (02)\\
\dynkin{0, 0, 0, 0, 1, 0} & \irrep{32} & 4 & (10)\\
\dynkin{0, 1, 0, 0, 0, 0} & \irrep{66} & 10 & (00)\\
\dynkin{2, 0, 0, 0, 0, 0} & \irrep{77} & 14 & (00)\\
\dynkin{0, 0, 1, 0, 0, 0} & \irrep{220} & 45 & (02)\\
\dynkin{1, 0, 0, 0, 0, 1} & \irrep{352} & 76 & (10)\\
\dynkin{3, 0, 0, 0, 0, 0} & \irrep[1]{352} & 104 & (02)\\
\dynkin{0, 0, 0, 0, 2, 0} & \irrep{462} & 126 & (00)\\
\dynkin{0, 0, 0, 1, 0, 0} & \irrep{495} & 120 & (00)\\
\dynkin{1, 1, 0, 0, 0, 0} & \irrep{560} & 140 & (02)\\
\dynkin{0, 0, 0, 0, 1, 1} & \irrep{792} & 210 & (02)\\
\dynkin{4, 0, 0, 0, 0, 0} & \irrep{1287} & 546 & (00)\\
\dynkin{0, 2, 0, 0, 0, 0} & \irrep{1638} & 546 & (00)\\
\dynkin{0, 1, 0, 0, 1, 0} & \irrep{1728} & 504 & (10)\\
\dynkin{1, 0, 1, 0, 0, 0} & \irrep{2079} & 630 & (00)\\
\dynkin{2, 0, 0, 0, 1, 0} & \irrep{2112} & 680 & (10)\\
\dynkin{2, 1, 0, 0, 0, 0} & \irrep{2860} & 1040 & (00)\\
\dynkin{5, 0, 0, 0, 0, 0} & \irrep{4004} & 2275 & (02)\\
\dynkin{0, 0, 0, 0, 3, 0} & \irrep{4224} & 1872 & (10)\\
\dynkin{1, 0, 0, 0, 2, 0} & \irrep{4752} & 1764 & (02)\\
\dynkin{1, 0, 0, 1, 0, 0} & \irrep{4928} & 1680 & (02)\\
\dynkin{0, 0, 1, 0, 0, 1} & \irrep[1]{4928} & 1736 & (10)\\
\dynkin{0, 1, 1, 0, 0, 0} & \irrep{8008} & 3094 & (02)\\
\dynkin{1, 0, 0, 0, 1, 1} & \irrep{8085} & 2940 & (00)\\
\dynkin{0, 0, 0, 1, 1, 0} & \irrep{8800} & 3500 & (10)\\
\dynkin{3, 0, 0, 0, 0, 1} & \irrep{9152} & 4056 & (10)\\
\dynkin{0, 0, 0, 0, 1, 2} & \irrep{9504} & 4068 & (10)\\
\dynkin{6, 0, 0, 0, 0, 0} & \irrep{11011} & 8008 & (00)\\
\dynkin{1, 2, 0, 0, 0, 0} & \irrep{11088} & 4956 & (02)\\
\dynkin{3, 1, 0, 0, 0, 0} & \irrep[1]{11088} & 5460 & (02)\\
\dynkin{2, 0, 1, 0, 0, 0} & \irrep{11232} & 4680 & (02)\\
\dynkin{1, 1, 0, 0, 0, 1} & \irrep{13728} & 5460 & (10)\\
\dynkin{0, 0, 2, 0, 0, 0} & \irrep{14014} & 6370 & (00)\\
\dynkin{0, 1, 0, 1, 0, 0} & \irrep{21021} & 8918 & (00)\\
\dynkin{0, 1, 0, 0, 2, 0} & \irrep{21450} & 9750 & (00)\\
\dynkin{0, 3, 0, 0, 0, 0} & \irrep{23100} & 12600 & (00)\\
\dynkin{2, 0, 0, 0, 2, 0} & \irrep{27027} & 13104 & (00)\\
\dynkin{2, 0, 0, 1, 0, 0} & \irrep{27456} & 12480 & (00)\\
\dynkin{7, 0, 0, 0, 0, 0} & \irrep[1]{27456} & 24752 & (02)\\
\dynkin{0, 0, 0, 0, 4, 0} & \irrep{28314} & 18018 & (00)\\
\dynkin{4, 0, 0, 0, 1, 0} & \irrep{32032} & 18564 & (10)\\
\dynkin{4, 1, 0, 0, 0, 0} & \irrep{35750} & 22750 & (00)\\
\dynkin{0, 1, 0, 0, 1, 1} & \irrep{36036} & 16107 & (02)\\
\dynkin{0, 2, 0, 0, 1, 0} & \irrep{36960} & 18060 & (10)\\
\dynkin{1, 0, 0, 0, 0, 3} & \irrep{41184} & 22620 & (10)\\
\dynkin{1, 0, 1, 0, 1, 0} & \irrep{43680} & 20020 & (10)\\
\dynkin{3, 0, 1, 0, 0, 0} & \irrep{45045} & 24570 & (00)\\
\dynkin{2, 0, 0, 0, 1, 1} & \irrep{45760} & 21840 & (02)\\
\dynkin{0, 0, 1, 1, 0, 0} & \irrep{48048} & 23660 & (02)\\
\end{longtable}
\newpage
\begin{longtable}{lrrc}
\caption{\label{tab:SO13Irreps}SO(13) Irreps}\\
\toprule
\rowcolor{tableheadcolor}
Dynkin & Dimension & l/2 & Congruency\\
\rowcolor{tableheadcolor}label & (name) & (index) & class\\
\midrule
\endfirsthead
\caption[]{SO(13) Irreps (continued)}\\
\toprule
\rowcolor{tableheadcolor}
Dynkin & Dimension & l/2 & Congruency\\
\rowcolor{tableheadcolor}label & (name) & (index) & class\\
\midrule
\endhead
\endfoot
\bottomrule
\endlastfoot
\dynkin{1, 0, 0, 0, 0, 0} & \irrep{13} & 1 & 0\\
\dynkin{0, 0, 0, 0, 0, 1} & \irrep{64} & 8 & 1\\
\dynkin{0, 1, 0, 0, 0, 0} & \irrep{78} & 11 & 0\\
\dynkin{2, 0, 0, 0, 0, 0} & \irrep{90} & 15 & 0\\
\dynkin{0, 0, 1, 0, 0, 0} & \irrep{286} & 55 & 0\\
\dynkin{3, 0, 0, 0, 0, 0} & \irrep{442} & 119 & 0\\
\dynkin{1, 1, 0, 0, 0, 0} & \irrep{715} & 165 & 0\\
\dynkin{1, 0, 0, 0, 0, 1} & \irrep{768} & 160 & 1\\
\dynkin{0, 0, 0, 0, 1, 0} & \irrep{1287} & 330 & 0\\
\dynkin{0, 0, 0, 0, 0, 2} & \irrep{1716} & 462 & 0\\
\dynkin{4, 0, 0, 0, 0, 0} & \irrep{1729} & 665 & 0\\
\dynkin{0, 2, 0, 0, 0, 0} & \irrep{2275} & 700 & 0\\
\dynkin{1, 0, 1, 0, 0, 0} & \irrep{2925} & 825 & 0\\
\dynkin{2, 1, 0, 0, 0, 0} & \irrep{3927} & 1309 & 0\\
\dynkin{0, 1, 0, 0, 0, 1} & \irrep{4160} & 1160 & 1\\
\dynkin{2, 0, 0, 0, 0, 1} & \irrep{4992} & 1520 & 1\\
\dynkin{5, 0, 0, 0, 0, 0} & \irrep{5733} & 2940 & 0\\
\dynkin{1, 0, 0, 1, 0, 0} & \irrep{7722} & 2475 & 0\\
\dynkin{0, 1, 1, 0, 0, 0} & \irrep{12285} & 4410 & 0\\
\dynkin{0, 0, 1, 0, 0, 1} & \irrep{13312} & 4480 & 1\\
\dynkin{1, 0, 0, 0, 1, 0} & \irrep{14300} & 4950 & 0\\
\dynkin{3, 1, 0, 0, 0, 0} & \irrep{16302} & 7315 & 0\\
\dynkin{1, 2, 0, 0, 0, 0} & \irrep{16575} & 6800 & 0\\
\dynkin{6, 0, 0, 0, 0, 0} & \irrep{16744} & 10948 & 0\\
\dynkin{2, 0, 1, 0, 0, 0} & \irrep{17017} & 6545 & 0\\
\end{longtable}
\vspace{-10pt}
\begin{longtable}{lrrcc}
\caption{\label{tab:SO14Irreps}SO(14) Irreps}\\
\toprule
\rowcolor{tableheadcolor}
Dynkin & Dimension & l/2 & Congruency & SU(2)${\times}$SU(2)${\times}$SO(10)\\
\rowcolor{tableheadcolor}label & (name) & (index) & class & singlets\\
\midrule
\endfirsthead
\caption[]{SO(14) Irreps (continued)}\\
\toprule
\rowcolor{tableheadcolor}
Dynkin & Dimension & l/2 & Congruency & SU(2)${\times}$SU(2)${\times}$SO(10)\\
\rowcolor{tableheadcolor}label & (name) & (index) & class & singlets\\
\midrule
\endhead
\endfoot
\bottomrule
\endlastfoot
\dynkin{1, 0, 0, 0, 0, 0, 0} & \irrep{14} & 1 & (02) & 0\\
\dynkin{0, 0, 0, 0, 0, 1, 0} & \irrep{64} & 8 & (11) & 0\\
\dynkin{0, 1, 0, 0, 0, 0, 0} & \irrep{91} & 12 & (00) & 0\\
\dynkin{2, 0, 0, 0, 0, 0, 0} & \irrep{104} & 16 & (00) & 1\\
\dynkin{0, 0, 1, 0, 0, 0, 0} & \irrep{364} & 66 & (02) & 0\\
\dynkin{3, 0, 0, 0, 0, 0, 0} & \irrep{546} & 135 & (02) & 0\\
\dynkin{1, 0, 0, 0, 0, 0, 1} & \irrep{832} & 168 & (11) & 0\\
\dynkin{1, 1, 0, 0, 0, 0, 0} & \irrep{896} & 192 & (02) & 0\\
\dynkin{0, 0, 0, 1, 0, 0, 0} & \irrep{1001} & 220 & (00) & 1\\
\dynkin{0, 0, 0, 0, 0, 2, 0} & \irrep{1716} & 462 & (02) & 0\\
\dynkin{0, 0, 0, 0, 1, 0, 0} & \irrep{2002} & 495 & (02) & 0\\
\dynkin{4, 0, 0, 0, 0, 0, 0} & \irrep{2275} & 800 & (00) & 1\\
\dynkin{0, 0, 0, 0, 0, 1, 1} & \irrep{3003} & 792 & (00) & 0\\
\dynkin{0, 2, 0, 0, 0, 0, 0} & \irrep{3080} & 880 & (00) & 1\\
\dynkin{1, 0, 1, 0, 0, 0, 0} & \irrep{4004} & 1056 & (00) & 0\\
\dynkin{0, 1, 0, 0, 0, 1, 0} & \irrep{4928} & 1320 & (11) & 0\\
\dynkin{2, 1, 0, 0, 0, 0, 0} & \irrep{5265} & 1620 & (00) & 0\\
\dynkin{2, 0, 0, 0, 0, 1, 0} & \irrep{5824} & 1688 & (11) & 0\\
\end{longtable}
\newpage
\begin{longtable}{lrrcc}
\caption{\label{tab:SO18Irreps}SO(18) Irreps}\\
\toprule
\rowcolor{tableheadcolor}
Dynkin & Dimension & l/2 & Congruency & SO(8)${\times}$SO(10)\\
\rowcolor{tableheadcolor}label & (name) & (index) & class & singlets\\
\midrule
\endfirsthead
\caption[]{SO(18) Irreps (continued)}\\
\toprule
\rowcolor{tableheadcolor}
Dynkin & Dimension & l/2 & Congruency & SO(8)${\times}$SO(10)\\
\rowcolor{tableheadcolor}label & (name) & (index) & class & singlets\\
\midrule
\endhead
\endfoot
\bottomrule
\endlastfoot
\dynkin{1, 0, 0, 0, 0, 0, 0, 0, 0} & \irrep{18} & 1 & (02) & 0\\
\dynkin{0, 1, 0, 0, 0, 0, 0, 0, 0} & \irrep{153} & 16 & (00) & 0\\
\dynkin{2, 0, 0, 0, 0, 0, 0, 0, 0} & \irrep{170} & 20 & (00) & 1\\
\dynkin{0, 0, 0, 0, 0, 0, 0, 0, 1} & \irrep{256} & 32 & (11) & 0\\
\dynkin{0, 0, 1, 0, 0, 0, 0, 0, 0} & \irrep{816} & 120 & (02) & 0\\
\dynkin{3, 0, 0, 0, 0, 0, 0, 0, 0} & \irrep{1122} & 209 & (02) & 0\\
\dynkin{1, 1, 0, 0, 0, 0, 0, 0, 0} & \irrep{1920} & 320 & (02) & 0\\
\dynkin{0, 0, 0, 1, 0, 0, 0, 0, 0} & \irrep{3060} & 560 & (00) & 0\\
\dynkin{1, 0, 0, 0, 0, 0, 0, 1, 0} & \irrep{4352} & 800 & (11) & 0\\
\end{longtable}
\begin{longtable}{lrrcc}
\caption{\label{tab:SO22Irreps}SO(22) Irreps}\\
\toprule
\rowcolor{tableheadcolor}
Dynkin & Dimension & l/2 & Congruency & SO(12)${\times}$SO(10)\\
\rowcolor{tableheadcolor}label & (name) & (index) & class & singlets\\
\midrule
\endfirsthead
\caption[]{SO(22) Irreps (continued)}\\
\toprule
\rowcolor{tableheadcolor}
Dynkin & Dimension & l/2 & Congruency & SO(12)${\times}$SO(10)\\
\rowcolor{tableheadcolor}label & (name) & (index) & class & singlets\\
\midrule
\endhead
\endfoot
\bottomrule
\endlastfoot
\dynkin{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} & \irrep{22} & 1 & (02) & 0\\
\dynkin{0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0} & \irrep{231} & 20 & (00) & 0\\
\dynkin{2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} & \irrep{252} & 24 & (00) & 1\\
\dynkin{0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0} & \irrep{1024} & 128 & (11) & 0\\
\dynkin{0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0} & \irrep{1540} & 190 & (02) & 0\\
\dynkin{1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0} & \irrep{3520} & 480 & (02) & 0\\
\dynkin{0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0} & \irrep{7315} & 1140 & (00) & 0\\
\end{longtable}
\begin{longtable}{lrrcc}
\caption{\label{tab:SO26Irreps}SO(26) Irreps}\\
\toprule
\rowcolor{tableheadcolor}
Dynkin & Dimension & l/2 & Congruency & SO(16)${\times}$SO(10)\\
\rowcolor{tableheadcolor}label & (name) & (index) & class & singlets\\
\midrule
\endfirsthead
\caption[]{SO(26) Irreps (continued)}\\
\toprule
\rowcolor{tableheadcolor}
Dynkin & Dimension & l/2 & Congruency & SO(16)${\times}$SO(10)\\
\rowcolor{tableheadcolor}label & (name) & (index) & class & singlets\\
\midrule
\endhead
\endfoot
\bottomrule
\endlastfoot
\dynkin{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} & \irrep{26} & 1 & (02) & 0\\
\dynkin{0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} & \irrep{325} & 24 & (00) & 0\\
\dynkin{0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} & \irrep{2600} & 276 & (02) & 0\\
\dynkin{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1} & \irrep{4096} & 512 & (11) & 0\\
\dynkin{1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} & \irrep{5824} & 672 & (02) & 0\\
\dynkin{0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0} & \irrep{14950} & 2024 & (00) & 0\\
\dynkin{1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} & \irrep{52325} & 7728 & (00) & 0\\
\dynkin{0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0} & \irrep{65780} & 10626 & (02) & 0\\
\dynkin{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0} & \irrep{102400} & 16896 & (11) & 0\\
\dynkin{0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0} & \irrep{230230} & 42504 & (00) & 0\\
\dynkin{1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0} & \irrep{320320} & 56672 & (02) & 0\\
\dynkin{0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} & \irrep{450450} & 83853 & (02) & 0\\
\dynkin{0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0} & \irrep{657800} & 134596 & (02) & 0\\
\end{longtable}
\newpage
\subsubsection{\Sp{N}}
\begin{longtable}{lrrc}
\caption{\label{tab:Sp4Irreps}Sp(4) Irreps}\\
\toprule
\rowcolor{tableheadcolor}
Dynkin & Dimension & l & Congruency\\
\rowcolor{tableheadcolor}label & (name) & (index) & class\\
\midrule
\endfirsthead
\caption[]{Sp(4) Irreps (continued)}\\
\toprule
\rowcolor{tableheadcolor}
Dynkin & Dimension & l & Congruency\\
\rowcolor{tableheadcolor}label & (name) & (index) & class\\
\midrule
\endhead
\endfoot
\bottomrule
\endlastfoot
\dynkin{1, 0} & \irrep{4} & 1 & 1\\
\dynkin{0, 1} & \irrep{5} & 2 & 0\\
\dynkin{2, 0} & \irrep{10} & 6 & 0\\
\dynkin{0, 2} & \irrep{14} & 14 & 0\\
\dynkin{1, 1} & \irrep{16} & 12 & 1\\
\dynkin{3, 0} & \irrep{20} & 21 & 1\\
\dynkin{0, 3} & \irrep{30} & 54 & 0\\
\dynkin{2, 1} & \irrep{35} & 42 & 0\\
\dynkin{4, 0} & \irrep[1]{35} & 56 & 0\\
\dynkin{1, 2} & \irrep{40} & 58 & 1\\
\dynkin{0, 4} & \irrep{55} & 154 & 0\\
\dynkin{5, 0} & \irrep{56} & 126 & 1\\
\dynkin{3, 1} & \irrep{64} & 112 & 1\\
\dynkin{1, 3} & \irrep{80} & 188 & 1\\
\dynkin{2, 2} & \irrep{81} & 162 & 0\\
\dynkin{6, 0} & \irrep{84} & 252 & 0\\
\dynkin{0, 5} & \irrep{91} & 364 & 0\\
\dynkin{4, 1} & \irrep{105} & 252 & 0\\
\dynkin{7, 0} & \irrep{120} & 462 & 1\\
\dynkin{3, 2} & \irrep{140} & 371 & 1\\
\dynkin{1, 4} & \irrep[1]{140} & 483 & 1\\
\dynkin{0, 6} & \irrep[2]{140} & 756 & 0\\
\dynkin{2, 3} & \irrep{154} & 462 & 0\\
\dynkin{5, 1} & \irrep{160} & 504 & 1\\
\dynkin{8, 0} & \irrep{165} & 792 & 0\\
\dynkin{0, 7} & \irrep{204} & 1428 & 0\\
\dynkin{4, 2} & \irrep{220} & 748 & 0\\
\dynkin{9, 0} & \irrep[1]{220} & 1287 & 1\\
\dynkin{1, 5} & \irrep{224} & 1064 & 1\\
\dynkin{6, 1} & \irrep{231} & 924 & 0\\
\dynkin{3, 3} & \irrep{256} & 960 & 1\\
\dynkin{2, 4} & \irrep{260} & 1092 & 0\\
\dynkin{0, 8} & \irrep{285} & 2508 & 0\\
\dynkincomma{10, 0} & \irrep{286} & 2002 & 0\\
\dynkin{7, 1} & \irrep{320} & 1584 & 1\\
\dynkin{5, 2} & \irrep{324} & 1377 & 1\\
\dynkin{1, 6} & \irrep{336} & 2100 & 1\\
\dynkincomma{11, 0} & \irrep{364} & 3003 & 1\\
\dynkin{0, 9} & \irrep{385} & 4158 & 0\\
\dynkin{4, 3} & \irrep{390} & 1794 & 0\\
\dynkin{2, 5} & \irrep{405} & 2268 & 0\\
\dynkin{3, 4} & \irrep{420} & 2121 & 1\\
\dynkin{8, 1} & \irrep{429} & 2574 & 0\\
\dynkin{6, 2} & \irrep{455} & 2366 & 0\\
\dynkincomma{12, 0} & \irrep[1]{455} & 4368 & 0\\
\dynkin{1, 7} & \irrep{480} & 3816 & 1\\
\dynkincomma{0, 10} & \irrep{506} & 6578 & 0\\
\dynkin{5, 3} & \irrep{560} & 3108 & 1\\
\dynkin{9, 1} & \irrep[1]{560} & 4004 & 1\\
\dynkincomma{13, 0} & \irrep[2]{560} & 6188 & 1\\
\dynkin{2, 6} & \irrep{595} & 4284 & 0\\
\dynkin{7, 2} & \irrep{616} & 3850 & 1\\
\dynkin{4, 4} & \irrep{625} & 3750 & 0\\
\dynkin{3, 5} & \irrep{640} & 4192 & 1\\
\dynkincomma{0, 11} & \irrep{650} & 10010 & 0\\
\dynkin{1, 8} & \irrep{660} & 6501 & 1\\
\dynkincomma{14, 0} & \irrep{680} & 8568 & 0\\
\dynkincomma{10, 1} & \irrep{715} & 6006 & 0\\
\dynkin{6, 3} & \irrep{770} & 5082 & 0\\
\dynkin{8, 2} & \irrep{810} & 5994 & 0\\
\dynkincomma{15, 0} & \irrep{816} & 11628 & 1\\
\dynkincomma{0, 12} & \irrep{819} & 14742 & 0\\
\dynkin{2, 7} & \irrep{836} & 7524 & 0\\
\dynkin{5, 4} & \irrep{880} & 6204 & 1\\
\dynkin{1, 9} & \irrep[1]{880} & 10516 & 1\\
\dynkincomma{11, 1} & \irrep{896} & 8736 & 1\\
\dynkin{3, 6} & \irrep{924} & 7623 & 1\\
\dynkin{4, 5} & \irrep{935} & 7106 & 0\\
\dynkincomma{16, 0} & \irrep{969} & 15504 & 0\\
\dynkincomma{0, 13} & \irrep{1015} & 21112 & 0\\
\dynkin{7, 3} & \irrep{1024} & 7936 & 1\\
\dynkin{9, 2} & \irrep{1040} & 8996 & 1\\
\dynkincomma{12, 1} & \irrep{1105} & 12376 & 0\\
\dynkin{2, 8} & \irrep{1134} & 12474 & 0\\
\dynkincomma{17, 0} & \irrep{1140} & 20349 & 1\\
\dynkincomma{1, 10} & \irrep{1144} & 16302 & 1\\
\dynkin{6, 4} & \irrep{1190} & 9758 & 0\\
\dynkincomma{0, 14} & \irrep{1240} & 29512 & 0\\
\dynkin{3, 7} & \irrep{1280} & 12992 & 1\\
\dynkin{5, 5} & \irrep{1296} & 11340 & 1\\
\dynkincomma{10, 2} & \irrep{1309} & 13090 & 0\\
\dynkin{8, 3} & \irrep{1326} & 11934 & 0\\
\dynkin{4, 6} & \irrep{1330} & 12502 & 0\\
\dynkincomma{18, 0} & \irrep[1]{1330} & 26334 & 0\\
\dynkincomma{13, 1} & \irrep{1344} & 17136 & 1\\
\dynkincomma{1, 11} & \irrep{1456} & 24388 & 1\\
\dynkin{2, 9} & \irrep{1495} & 19734 & 0\\
\dynkincomma{0, 15} & \irrep{1496} & 40392 & 0\\
\dynkincomma{19, 0} & \irrep{1540} & 33649 & 1\\
\dynkin{7, 4} & \irrep{1560} & 14742 & 1\\
\dynkincomma{14, 1} & \irrep{1615} & 23256 & 0\\
\dynkincomma{11, 2} & \irrep{1620} & 18549 & 1\\
\dynkin{9, 3} & \irrep{1680} & 17388 & 1\\
\dynkin{3, 8} & \irrep{1716} & 21021 & 1\\
\dynkin{6, 5} & \irrep{1729} & 17290 & 0\\
\dynkincomma{20, 0} & \irrep{1771} & 42504 & 0\\
\dynkincomma{0, 16} & \irrep{1785} & 54264 & 0\\
\end{longtable}
\newpage
\begin{longtable}{lrrc}
\caption{\label{tab:Sp6Irreps}Sp(6) Irreps}\\
\toprule
\rowcolor{tableheadcolor}
Dynkin & Dimension & l & Congruency\\
\rowcolor{tableheadcolor}label & (name) & (index) & class\\
\midrule
\endfirsthead
\caption[]{Sp(6) Irreps (continued)}\\
\toprule
\rowcolor{tableheadcolor}
Dynkin & Dimension & l & Congruency\\
\rowcolor{tableheadcolor}label & (name) & (index) & class\\
\midrule
\endhead
\endfoot
\bottomrule
\endlastfoot
\dynkin{1, 0, 0} & \irrep{6} & 1 & 1\\
\dynkin{0, 1, 0} & \irrep{14} & 4 & 0\\
\dynkin{0, 0, 1} & \irrep[1]{14} & 5 & 1\\
\dynkin{2, 0, 0} & \irrep{21} & 8 & 0\\
\dynkin{3, 0, 0} & \irrep{56} & 36 & 1\\
\dynkin{1, 1, 0} & \irrep{64} & 32 & 1\\
\dynkin{1, 0, 1} & \irrep{70} & 40 & 0\\
\dynkin{0, 0, 2} & \irrep{84} & 72 & 0\\
\dynkin{0, 2, 0} & \irrep{90} & 60 & 0\\
\dynkin{0, 1, 1} & \irrep{126} & 93 & 1\\
\dynkin{4, 0, 0} & \irrep[1]{126} & 120 & 0\\
\dynkin{2, 1, 0} & \irrep{189} & 144 & 0\\
\dynkin{2, 0, 1} & \irrep{216} & 180 & 1\\
\dynkin{5, 0, 0} & \irrep{252} & 330 & 1\\
\dynkin{0, 0, 3} & \irrep{330} & 495 & 1\\
\dynkin{1, 2, 0} & \irrep{350} & 325 & 1\\
\dynkin{1, 0, 2} & \irrep{378} & 423 & 1\\
\dynkin{0, 3, 0} & \irrep{385} & 440 & 0\\
\dynkin{3, 1, 0} & \irrep{448} & 480 & 1\\
\dynkin{6, 0, 0} & \irrep{462} & 792 & 0\\
\dynkin{1, 1, 1} & \irrep{512} & 512 & 0\\
\dynkin{3, 0, 1} & \irrep{525} & 600 & 0\\
\dynkin{0, 1, 2} & \irrep{594} & 792 & 0\\
\dynkin{0, 2, 1} & \irrep{616} & 748 & 1\\
\dynkin{7, 0, 0} & \irrep{792} & 1716 & 1\\
\dynkin{2, 2, 0} & \irrep{924} & 1144 & 0\\
\dynkin{4, 1, 0} & \irrep[1]{924} & 1320 & 0\\
\dynkin{0, 0, 4} & \irrep{1001} & 2288 & 0\\
\dynkin{2, 0, 2} & \irrep{1078} & 1540 & 0\\
\dynkin{4, 0, 1} & \irrep{1100} & 1650 & 1\\
\dynkin{0, 4, 0} & \irrep{1274} & 2184 & 0\\
\dynkin{8, 0, 0} & \irrep{1287} & 3432 & 0\\
\dynkin{1, 3, 0} & \irrep{1344} & 1952 & 1\\
\dynkin{2, 1, 1} & \irrep{1386} & 1815 & 1\\
\dynkin{1, 0, 3} & \irrep[1]{1386} & 2508 & 0\\
\dynkin{5, 1, 0} & \irrep{1728} & 3168 & 1\\
\dynkin{0, 1, 3} & \irrep{2002} & 4147 & 1\\
\dynkin{9, 0, 0} & \irrep[1]{2002} & 6435 & 1\\
\dynkin{3, 2, 0} & \irrep{2016} & 3216 & 1\\
\dynkin{5, 0, 1} & \irrep{2079} & 3960 & 0\\
\dynkin{0, 3, 1} & \irrep{2184} & 3900 & 1\\
\dynkin{1, 2, 1} & \irrep{2205} & 3360 & 0\\
\dynkin{1, 1, 2} & \irrep{2240} & 3680 & 1\\
\dynkin{0, 2, 2} & \irrep{2457} & 4680 & 0\\
\dynkin{3, 0, 2} & \irrep{2464} & 4400 & 1\\
\dynkin{0, 0, 5} & \irrep{2548} & 8190 & 1\\
\dynkin{6, 1, 0} & \irrep{3003} & 6864 & 0\\
\dynkincomma{10, 0, 0} & \irrep[1]{3003} & 11440 & 0\\
\dynkin{3, 1, 1} & \irrep{3072} & 5120 & 0\\
\dynkin{2, 3, 0} & \irrep{3276} & 5928 & 0\\
\dynkin{0, 5, 0} & \irrep{3528} & 8400 & 0\\
\dynkin{6, 0, 1} & \irrep{3640} & 8580 & 1\\
\dynkin{2, 0, 3} & \irrep{3744} & 8112 & 1\\
\dynkin{4, 2, 0} & \irrep{3900} & 7800 & 0\\
\dynkin{1, 0, 4} & \irrep{4004} & 10582 & 1\\
\dynkin{1, 4, 0} & \irrep{4116} & 8526 & 1\\
\dynkincomma{11, 0, 0} & \irrep{4368} & 19448 & 1\\
\dynkin{4, 0, 2} & \irrep{4914} & 10764 & 0\\
\dynkin{7, 1, 0} & \irrep{4928} & 13728 & 1\\
\dynkin{2, 2, 1} & \irrep{5460} & 10270 & 1\\
\dynkin{0, 1, 4} & \irrep[1]{5460} & 16120 & 0\\
\dynkin{0, 0, 6} & \irrep{5712} & 24480 & 0\\
\dynkin{2, 1, 2} & \irrep{5720} & 11440 & 0\\
\dynkin{4, 1, 1} & \irrep{6006} & 12441 & 1\\
\dynkin{7, 0, 1} & \irrep[1]{6006} & 17160 & 0\\
\dynkincomma{12, 0, 0} & \irrep{6188} & 31824 & 0\\
\dynkin{0, 4, 1} & \irrep{6300} & 15450 & 1\\
\dynkin{3, 3, 0} & \irrep{6720} & 14880 & 1\\
\dynkin{5, 2, 0} & \irrep{6930} & 16995 & 1\\
\dynkin{1, 3, 1} & \irrep{7168} & 15360 & 0\\
\dynkin{1, 1, 3} & \irrep[1]{7168} & 17408 & 0\\
\dynkin{0, 2, 3} & \irrep{7392} & 20240 & 1\\
\dynkin{0, 3, 2} & \irrep{7700} & 19800 & 0\\
\dynkin{8, 1, 0} & \irrep{7722} & 25740 & 0\\
\dynkin{3, 0, 3} & \irrep{8190} & 21060 & 0\\
\dynkin{1, 2, 2} & \irrep{8316} & 18810 & 1\\
\dynkin{0, 6, 0} & \irrep{8568} & 26928 & 0\\
\dynkincomma{13, 0, 0} & \irrep[1]{8568} & 50388 & 1\\
\dynkin{5, 0, 2} & \irrep{8918} & 23569 & 1\\
\dynkin{2, 4, 0} & \irrep{9450} & 23400 & 0\\
\dynkin{8, 0, 1} & \irrep[1]{9450} & 32175 & 1\\
\dynkin{1, 0, 5} & \irrep{9828} & 35568 & 0\\
\dynkin{2, 0, 4} & \irrep{10395} & 31680 & 0\\
\dynkin{5, 1, 1} & \irrep{10752} & 27136 & 0\\
\dynkin{1, 5, 0} & \irrep[1]{10752} & 29952 & 1\\
\dynkin{3, 2, 1} & \irrep{11319} & 25872 & 0\\
\dynkin{6, 2, 0} & \irrep{11550} & 34100 & 0\\
\dynkin{0, 0, 7} & \irrep{11628} & 63954 & 1\\
\dynkincomma{14, 0, 0} & \irrep[1]{11628} & 77520 & 0\\
\dynkin{9, 1, 0} & \irrep{11648} & 45760 & 1\\
\dynkin{3, 1, 2} & \irrep{12096} & 29088 & 1\\
\dynkin{4, 3, 0} & \irrep{12375} & 33000 & 0\\
\dynkin{0, 1, 5} & \irrep{12852} & 51102 & 1\\
\dynkin{9, 0, 1} & \irrep{14300} & 57200 & 0\\
\dynkin{6, 0, 2} & \irrep{15092} & 47432 & 0\\
\dynkincomma{15, 0, 0} & \irrep{15504} & 116280 & 1\\
\dynkin{0, 5, 1} & \irrep{15708} & 50490 & 1\\
\dynkin{4, 0, 3} & \irrep{15750} & 47625 & 1\\
\end{longtable}
\newpage
\begin{longtable}{lrrc}
\caption{\label{tab:Sp8Irreps}Sp(8) Irreps}\\
\toprule
\rowcolor{tableheadcolor}
Dynkin & Dimension & l & Congruency\\
\rowcolor{tableheadcolor}label & (name) & (index) & class\\
\midrule
\endfirsthead
\caption[]{Sp(8) Irreps (continued)}\\
\toprule
\rowcolor{tableheadcolor}
Dynkin & Dimension & l & Congruency\\
\rowcolor{tableheadcolor}label & (name) & (index) & class\\
\midrule
\endhead
\endfoot
\bottomrule
\endlastfoot
\dynkin{1, 0, 0, 0} & \irrep{8} & 1 & 1\\
\dynkin{0, 1, 0, 0} & \irrep{27} & 6 & 0\\
\dynkin{2, 0, 0, 0} & \irrep{36} & 10 & 0\\
\dynkin{0, 0, 0, 1} & \irrep{42} & 14 & 0\\
\dynkin{0, 0, 1, 0} & \irrep{48} & 14 & 1\\
\dynkin{3, 0, 0, 0} & \irrep{120} & 55 & 1\\
\dynkin{1, 1, 0, 0} & \irrep{160} & 60 & 1\\
\dynkin{1, 0, 0, 1} & \irrep{288} & 140 & 1\\
\dynkin{0, 2, 0, 0} & \irrep{308} & 154 & 0\\
\dynkin{1, 0, 1, 0} & \irrep{315} & 140 & 0\\
\dynkin{4, 0, 0, 0} & \irrep{330} & 220 & 0\\
\dynkin{2, 1, 0, 0} & \irrep{594} & 330 & 0\\
\dynkin{0, 0, 0, 2} & \irrep[1]{594} & 462 & 0\\
\dynkin{0, 1, 1, 0} & \irrep{792} & 451 & 1\\
\dynkin{0, 1, 0, 1} & \irrep[1]{792} & 484 & 0\\
\dynkin{5, 0, 0, 0} & \irrep[2]{792} & 715 & 1\\
\dynkin{0, 0, 2, 0} & \irrep{825} & 550 & 0\\
\dynkin{0, 0, 1, 1} & \irrep{1056} & 748 & 1\\
\dynkin{2, 0, 0, 1} & \irrep{1155} & 770 & 0\\
\dynkin{2, 0, 1, 0} & \irrep{1232} & 770 & 1\\
\dynkin{1, 2, 0, 0} & \irrep{1512} & 1029 & 1\\
\dynkin{6, 0, 0, 0} & \irrep{1716} & 2002 & 0\\
\dynkin{3, 1, 0, 0} & \irrep{1728} & 1320 & 1\\
\dynkin{0, 3, 0, 0} & \irrep{2184} & 1820 & 0\\
\dynkin{7, 0, 0, 0} & \irrep{3432} & 5005 & 1\\
\dynkin{3, 0, 0, 1} & \irrep{3520} & 3080 & 1\\
\dynkin{3, 0, 1, 0} & \irrep{3696} & 3080 & 0\\
\dynkin{1, 0, 0, 2} & \irrep[1]{3696} & 3542 & 1\\
\dynkin{1, 1, 1, 0} & \irrep{4096} & 3072 & 0\\
\dynkin{1, 1, 0, 1} & \irrep{4200} & 3325 & 1\\
\dynkin{4, 1, 0, 0} & \irrep{4290} & 4290 & 0\\
\dynkin{0, 0, 0, 3} & \irrep{4719} & 6292 & 0\\
\dynkin{1, 0, 2, 0} & \irrep{4752} & 4026 & 1\\
\dynkin{2, 2, 0, 0} & \irrep{4914} & 4368 & 0\\
\dynkin{1, 0, 1, 1} & \irrep{6237} & 5544 & 0\\
\dynkin{8, 0, 0, 0} & \irrep{6435} & 11440 & 0\\
\dynkin{0, 2, 1, 0} & \irrep{6552} & 5915 & 1\\
\dynkin{0, 2, 0, 1} & \irrep{7020} & 6630 & 0\\
\dynkin{0, 0, 3, 0} & \irrep{8008} & 9009 & 1\\
\dynkin{4, 0, 0, 1} & \irrep{9009} & 10010 & 0\\
\dynkin{0, 1, 0, 2} & \irrep{9009} & 10010 & 0\\
\dynkin{4, 0, 1, 0} & \irrep{9360} & 10010 & 1\\
\dynkin{1, 3, 0, 0} & \irrep{9408} & 9800 & 1\\
\dynkin{5, 1, 0, 0} & \irrep{9504} & 12012 & 1\\
\dynkin{0, 1, 2, 0} & \irrep{10010} & 10010 & 0\\
\dynkin{0, 0, 1, 2} & \irrep{10296} & 12727 & 1\\
\dynkin{0, 4, 0, 0} & \irrep{11340} & 13860 & 0\\
\dynkin{9, 0, 0, 0} & \irrep{11440} & 24310 & 1\\
\dynkin{0, 0, 2, 1} & \irrep{12012} & 14014 & 0\\
\end{longtable}
\newpage
\begin{longtable}{lrrc}
\caption{\label{tab:Sp10Irreps}Sp(10) Irreps}\\
\toprule
\rowcolor{tableheadcolor}
Dynkin & Dimension & l & Congruency\\
\rowcolor{tableheadcolor}label & (name) & (index) & class\\
\midrule
\endfirsthead
\caption[]{Sp(10) Irreps (continued)}\\
\toprule
\rowcolor{tableheadcolor}
Dynkin & Dimension & l & Congruency\\
\rowcolor{tableheadcolor}label & (name) & (index) & class\\
\midrule
\endhead
\endfoot
\bottomrule
\endlastfoot
\dynkin{1, 0, 0, 0, 0} & \irrep{10} & 1 & 1\\
\dynkin{0, 1, 0, 0, 0} & \irrep{44} & 8 & 0\\
\dynkin{2, 0, 0, 0, 0} & \irrep{55} & 12 & 0\\
\dynkin{0, 0, 1, 0, 0} & \irrep{110} & 27 & 1\\
\dynkin{0, 0, 0, 0, 1} & \irrep{132} & 42 & 1\\
\dynkin{0, 0, 0, 1, 0} & \irrep{165} & 48 & 0\\
\dynkin{3, 0, 0, 0, 0} & \irrep{220} & 78 & 1\\
\dynkin{1, 1, 0, 0, 0} & \irrep{320} & 96 & 1\\
\dynkin{4, 0, 0, 0, 0} & \irrep{715} & 364 & 0\\
\dynkin{0, 2, 0, 0, 0} & \irrep{780} & 312 & 0\\
\dynkin{1, 0, 1, 0, 0} & \irrep{891} & 324 & 0\\
\dynkin{1, 0, 0, 0, 1} & \irrep{1155} & 504 & 0\\
\dynkin{1, 0, 0, 1, 0} & \irrep{1408} & 576 & 1\\
\dynkin{2, 1, 0, 0, 0} & \irrep{1430} & 624 & 0\\
\dynkin{5, 0, 0, 0, 0} & \irrep{2002} & 1365 & 1\\
\dynkin{0, 1, 1, 0, 0} & \irrep{2860} & 1326 & 1\\
\dynkin{0, 0, 2, 0, 0} & \irrep{4004} & 2184 & 0\\
\dynkin{2, 0, 1, 0, 0} & \irrep{4212} & 2106 & 1\\
\dynkin{0, 1, 0, 0, 1} & \irrep{4290} & 2301 & 1\\
\dynkin{1, 2, 0, 0, 0} & \irrep{4620} & 2478 & 1\\
\dynkin{0, 0, 0, 0, 2} & \irrep{4719} & 3432 & 0\\
\dynkin{3, 1, 0, 0, 0} & \irrep{4928} & 2912 & 1\\
\dynkin{0, 1, 0, 1, 0} & \irrep{5005} & 2548 & 0\\
\dynkin{6, 0, 0, 0, 0} & \irrep[1]{5005} & 4368 & 0\\
\dynkin{2, 0, 0, 0, 1} & \irrep{5720} & 3276 & 1\\
\dynkin{2, 0, 0, 1, 0} & \irrep{6864} & 3744 & 0\\
\dynkin{0, 0, 0, 2, 0} & \irrep{7865} & 5148 & 0\\
\dynkin{0, 3, 0, 0, 0} & \irrep{8250} & 5400 & 0\\
\dynkin{0, 0, 1, 0, 1} & \irrep{8580} & 5304 & 0\\
\dynkin{0, 0, 1, 1, 0} & \irrep{9152} & 5408 & 1\\
\dynkin{0, 0, 0, 1, 1} & \irrep{9438} & 6435 & 1\\
\dynkin{7, 0, 0, 0, 0} & \irrep{11440} & 12376 & 1\\
\dynkin{4, 1, 0, 0, 0} & \irrep{14300} & 10920 & 0\\
\dynkin{3, 0, 1, 0, 0} & \irrep{15015} & 9828 & 0\\
\dynkin{2, 2, 0, 0, 0} & \irrep{17820} & 12312 & 0\\
\dynkin{1, 1, 1, 0, 0} & \irrep{17920} & 10752 & 0\\
\dynkin{3, 0, 0, 0, 1} & \irrep{21021} & 15288 & 0\\
\dynkin{8, 0, 0, 0, 0} & \irrep{24310} & 31824 & 0\\
\dynkin{3, 0, 0, 1, 0} & \irrep{24960} & 17472 & 1\\
\dynkin{1, 0, 2, 0, 0} & \irrep{28028} & 19110 & 1\\
\dynkin{1, 1, 0, 0, 1} & \irrep{28160} & 18944 & 0\\
\dynkin{1, 1, 0, 1, 0} & \irrep{32340} & 20874 & 1\\
\dynkin{0, 2, 1, 0, 0} & \irrep{35640} & 25596 & 1\\
\dynkin{5, 1, 0, 0, 0} & \irrep{36608} & 34944 & 1\\
\dynkin{1, 0, 0, 0, 2} & \irrep{37752} & 32604 & 1\\
\dynkin{1, 3, 0, 0, 0} & \irrep{42240} & 34176 & 1\\
\dynkin{4, 0, 1, 0, 0} & \irrep{44550} & 36855 & 1\\
\dynkin{9, 0, 0, 0, 0} & \irrep{48620} & 75582 & 1\\
\end{longtable}
\newpage
\begin{longtable}{lrrc}
\caption{\label{tab:Sp12Irreps}Sp(12) Irreps}\\
\toprule
\rowcolor{tableheadcolor}
Dynkin & Dimension & l & Congruency\\
\rowcolor{tableheadcolor}label & (name) & (index) & class\\
\midrule
\endfirsthead
\caption[]{Sp(12) Irreps (continued)}\\
\toprule
\rowcolor{tableheadcolor}
Dynkin & Dimension & l & Congruency\\
\rowcolor{tableheadcolor}label & (name) & (index) & class\\
\midrule
\endhead
\endfoot
\bottomrule
\endlastfoot
\dynkin{1, 0, 0, 0, 0, 0} & \irrep{12} & 1 & 1\\
\dynkin{0, 1, 0, 0, 0, 0} & \irrep{65} & 10 & 0\\
\dynkin{2, 0, 0, 0, 0, 0} & \irrep{78} & 14 & 0\\
\dynkin{0, 0, 1, 0, 0, 0} & \irrep{208} & 44 & 1\\
\dynkin{3, 0, 0, 0, 0, 0} & \irrep{364} & 105 & 1\\
\dynkin{0, 0, 0, 1, 0, 0} & \irrep{429} & 110 & 0\\
\dynkin{0, 0, 0, 0, 0, 1} & \irrep[1]{429} & 132 & 0\\
\dynkin{1, 1, 0, 0, 0, 0} & \irrep{560} & 140 & 1\\
\dynkin{0, 0, 0, 0, 1, 0} & \irrep{572} & 165 & 1\\
\dynkin{4, 0, 0, 0, 0, 0} & \irrep{1365} & 560 & 0\\
\dynkin{0, 2, 0, 0, 0, 0} & \irrep{1650} & 550 & 0\\
\dynkin{1, 0, 1, 0, 0, 0} & \irrep{2002} & 616 & 0\\
\dynkin{2, 1, 0, 0, 0, 0} & \irrep{2925} & 1050 & 0\\
\dynkin{1, 0, 0, 1, 0, 0} & \irrep{4368} & 1540 & 1\\
\dynkin{5, 0, 0, 0, 0, 0} & \irrep[1]{4368} & 2380 & 1\\
\dynkin{1, 0, 0, 0, 0, 1} & \irrep{4576} & 1848 & 1\\
\dynkin{1, 0, 0, 0, 1, 0} & \irrep{6006} & 2310 & 0\\
\dynkin{0, 1, 1, 0, 0, 0} & \irrep{7800} & 3050 & 1\\
\dynkin{2, 0, 1, 0, 0, 0} & \irrep{11088} & 4620 & 1\\
\dynkin{1, 2, 0, 0, 0, 0} & \irrep{11440} & 5060 & 1\\
\dynkin{3, 1, 0, 0, 0, 0} & \irrep{11648} & 5600 & 1\\
\dynkin{6, 0, 0, 0, 0, 0} & \irrep{12376} & 8568 & 0\\
\dynkin{0, 0, 2, 0, 0, 0} & \irrep{13650} & 6300 & 0\\
\dynkin{0, 1, 0, 1, 0, 0} & \irrep{18954} & 8262 & 0\\
\dynkin{0, 1, 0, 0, 0, 1} & \irrep{21450} & 10450 & 0\\
\dynkin{0, 3, 0, 0, 0, 0} & \irrep{24310} & 13090 & 0\\
\end{longtable}
\begin{longtable}{lrrc}
\caption{\label{tab:Sp14Irreps}Sp(14) Irreps}\\
\toprule
\rowcolor{tableheadcolor}
Dynkin & Dimension & l & Congruency\\
\rowcolor{tableheadcolor}label & (name) & (index) & class\\
\midrule
\endfirsthead
\caption[]{Sp(14) Irreps (continued)}\\
\toprule
\rowcolor{tableheadcolor}
Dynkin & Dimension & l & Congruency\\
\rowcolor{tableheadcolor}label & (name) & (index) & class\\
\midrule
\endhead
\endfoot
\bottomrule
\endlastfoot
\dynkin{1, 0, 0, 0, 0, 0, 0} & \irrep{14} & 1 & 1\\
\dynkin{0, 1, 0, 0, 0, 0, 0} & \irrep{90} & 12 & 0\\
\dynkin{2, 0, 0, 0, 0, 0, 0} & \irrep{105} & 16 & 0\\
\dynkin{0, 0, 1, 0, 0, 0, 0} & \irrep{350} & 65 & 1\\
\dynkin{3, 0, 0, 0, 0, 0, 0} & \irrep{560} & 136 & 1\\
\dynkin{1, 1, 0, 0, 0, 0, 0} & \irrep{896} & 192 & 1\\
\dynkin{0, 0, 0, 1, 0, 0, 0} & \irrep{910} & 208 & 0\\
\dynkin{0, 0, 0, 0, 0, 0, 1} & \irrep{1430} & 429 & 1\\
\dynkin{0, 0, 0, 0, 1, 0, 0} & \irrep{1638} & 429 & 1\\
\dynkin{0, 0, 0, 0, 0, 1, 0} & \irrep{2002} & 572 & 0\\
\dynkin{4, 0, 0, 0, 0, 0, 0} & \irrep{2380} & 816 & 0\\
\dynkin{0, 2, 0, 0, 0, 0, 0} & \irrep{3094} & 884 & 0\\
\dynkin{1, 0, 1, 0, 0, 0, 0} & \irrep{3900} & 1040 & 0\\
\dynkin{2, 1, 0, 0, 0, 0, 0} & \irrep{5355} & 1632 & 0\\
\dynkin{5, 0, 0, 0, 0, 0, 0} & \irrep{8568} & 3876 & 1\\
\end{longtable}
\newpage
\subsubsection{Exceptional Algebras}
\enlargethispage{10pt}
\begin{longtable}{lrrcccc}
\caption{\label{tab:E6Irreps}\E6 Irreps}\\
\toprule
\rowcolor{tableheadcolor}
Dynkin & Dimension & l/6 & & SO(10) & SU(6)${\times}$SU(2) & SU(3)${\times}$SU(3)${\times}$SU(3)\\
\rowcolor{tableheadcolor}label & (name) & (index) & Triality & singlets & singlets & singlets\\
\midrule
\endfirsthead
\caption[]{$E_6$ Irreps (continued)}\\
\toprule
\rowcolor{tableheadcolor}
Dynkin & Dimension & l/6 & & SO(10) & SU(6)${\times}$SU(2) & SU(3)${\times}$SU(3)${\times}$SU(3)\\
\rowcolor{tableheadcolor}label & (name) & (index) & Triality & singlets & singlets & singlets\\
\midrule
\endhead
\multicolumn{7}{l}{\footnotesize $^\ast$SO(10)${\times}$U(1) singlets resp.}
\endfoot
\bottomrule
\multicolumn{7}{l}{\footnotesize $^\ast$SO(10)${\times}$U(1) singlets resp.}
\endlastfoot
\dynkin{1, 0, 0, 0, 0, 0} & \irrep{27} & 1 & 1 & 1 & 0 & 0\\
\dynkin{0, 0, 0, 0, 0, 1} & \irrep{78} & 4 & 0 & \starred{1} & 0 & 0\\
\dynkin{0, 0, 0, 1, 0, 0} & \irrep{351} & 25 & 1 & 0 & 0 & 0\\
\dynkin{0, 0, 0, 0, 2, 0} & \irrep[1]{351} & 28 & 1 & 1 & 0 & 0\\
\dynkin{1, 0, 0, 0, 1, 0} & \irrep{650} & 50 & 0 & \starred{1} & 1 & 2\\
\dynkin{1, 0, 0, 0, 0, 1} & \irrep{1728} & 160 & 1 & 1 & 0 & 0\\
\dynkin{0, 0, 0, 0, 0, 2} & \irrep{2430} & 270 & 0 & \starred{1} & 1 & 1\\
\dynkin{0, 0, 1, 0, 0, 0} & \irrep{2925} & 300 & 0 & 0 & 0 & 1\\
\dynkin{3, 0, 0, 0, 0, 0} & \irrep{3003} & 385 & 0 & 1 & 0 & 1\\
\dynkin{1, 1, 0, 0, 0, 0} & \irrep{5824} & 672 & 0 & 0 & 0 & 0\\
\dynkin{0, 1, 0, 0, 1, 0} & \irrep{7371} & 840 & 1 & 0 & 0 & 0\\
\dynkin{2, 0, 0, 0, 1, 0} & \irrep{7722} & 946 & 1 & 1 & 0 & 0\\
\dynkin{0, 0, 0, 1, 0, 1} & \irrep{17550} & 2300 & 1 & 0 & 0 & 0\\
\dynkin{0, 0, 0, 0, 2, 1} & \irrep{19305} & 2695 & 1 & 1 & 0 & 0\\
\dynkin{4, 0, 0, 0, 0, 0} & \irrep[1]{19305} & 3520 & 1 & 1 & 0 & 0\\
\dynkin{0, 2, 0, 0, 0, 0} & \irrep{34398} & 5390 & 1 & 0 & 0 & 0\\
\dynkin{1, 0, 0, 0, 1, 1} & \irrep{34749} & 4752 & 0 & \starred{1} & 0 & 2\\
\dynkin{0, 0, 0, 0, 0, 3} & \irrep{43758} & 7854 & 0 & \starred{1} & 0 & 1\\
\dynkin{1, 0, 0, 0, 0, 2} & \irrep{46332} & 7260 & 1 & 1 & 0 & 0\\
\dynkin{1, 0, 1, 0, 0, 0} & \irrep{51975} & 7700 & 1 & 0 & 0 & 0\\
\dynkin{2, 1, 0, 0, 0, 0} & \irrep{54054} & 8932 & 1 & 0 & 0 & 0\\
\dynkin{1, 0, 0, 0, 3, 0} & \irrep{61425} & 10675 & 1 & 1 & 0 & 0\\
\dynkin{0, 1, 0, 1, 0, 0} & \irrep{70070} & 10780 & 0 & 0 & 1 & 3\\
\dynkin{2, 0, 0, 1, 0, 0} & \irrep{78975} & 12825 & 0 & 0 & 0 & 1\\
\dynkin{2, 0, 0, 0, 2, 0} & \irrep{85293} & 14580 & 0 & \starred{1} & 1 & 3\\
\dynkin{0, 0, 0, 0, 5, 0} & \irrep{100386} & 24310 & 1 & 1 & 0 & 0\\
\dynkin{0, 0, 1, 0, 0, 1} & \irrep{105600} & 17600 & 0 & 0 & 0 & 0\\
\dynkin{1, 0, 0, 1, 1, 0} & \irrep{112320} & 18080 & 1 & 0 & 0 & 0\\
\dynkin{3, 0, 0, 0, 0, 1} & \irrep{146432} & 28160 & 0 & 1 & 0 & 0\\
\dynkin{1, 1, 0, 0, 0, 1} & \irrep{252252} & 45276 & 0 & 0 & 0 & 2\\
\dynkin{0, 1, 0, 0, 1, 1} & \irrep{314496} & 56000 & 1 & 0 & 0 & 0\\
\dynkin{2, 0, 0, 0, 1, 1} & \irrep{359424} & 67072 & 1 & 1 & 0 & 0\\
\dynkin{0, 0, 0, 1, 3, 0} & \irrep[1]{359424} & 79360 & 1 & 0 & 0 & 0\\
\dynkin{4, 0, 0, 0, 1, 0} & \irrep{371800} & 85800 & 0 & 1 & 0 & 2\\
\dynkin{0, 0, 1, 1, 0, 0} & \irrep{386100} & 73700 & 1 & 0 & 0 & 0\\
\dynkin{0, 0, 0, 1, 0, 2} & \irrep{393822} & 78540 & 1 & 0 & 0 & 0\\
\dynkin{0, 0, 0, 2, 1, 0} & \irrep{412776} & 85848 & 1 & 0 & 0 & 0\\
\dynkin{6, 0, 0, 0, 0, 0} & \irrep{442442} & 136136 & 0 & 1 & 0 & 1\\
\dynkin{0, 0, 0, 0, 2, 2} & \irrep{459459} & 95557 & 1 & 1 & 0 & 0\\
\dynkin{0, 0, 1, 0, 2, 0} & \irrep{494208} & 98560 & 1 & 0 & 0 & 0\\
\dynkin{0, 0, 0, 0, 0, 4} & \irrep{537966} & 137940 & 0 & \starred{1} & 1 & 1\\
\dynkin{3, 0, 0, 1, 0, 0} & \irrep{579150} & 125400 & 1 & 0 & 0 & 0\\
\dynkin{1, 0, 0, 2, 0, 0} & \irrep{600600} & 123200 & 0 & 0 & 0 & 1\\
\dynkin{3, 0, 0, 0, 2, 0} & \irrep{638820} & 143780 & 1 & 1 & 0 & 0\\
\dynkin{1, 0, 0, 0, 0, 3} & \irrep{741312} & 170016 & 1 & 1 & 0 & 0\\
\dynkin{1, 0, 0, 0, 1, 2} & \irrep{812175} & 166600 & 0 & \starred{1} & 1 & 3\\
\dynkin{1, 0, 1, 0, 1, 0} & \irrep{852930} & 167670 & 0 & 0 & 0 & 3\\
\end{longtable}
\newpage
\begin{longtable}{lrrc}
\caption{\label{tab:E7Irreps}\E7 Irreps}\\
\toprule
\rowcolor{tableheadcolor}
Dynkin & Dimension & l/12 & Congruency\\
\rowcolor{tableheadcolor}label & (name) & (index) & class\\
\midrule
\endfirsthead
\caption[]{$E_7$ Irreps (continued)}\\
\toprule
\rowcolor{tableheadcolor}
Dynkin & Dimension & l/12 & Congruency\\
\rowcolor{tableheadcolor}label & (name) & (index) & class\\
\midrule
\endhead
\endfoot
\bottomrule
\endlastfoot
\dynkin{0, 0, 0, 0, 0, 1, 0} & \irrep{56} & 1 & 1\\
\dynkin{1, 0, 0, 0, 0, 0, 0} & \irrep{133} & 3 & 0\\
\dynkin{0, 0, 0, 0, 0, 0, 1} & \irrep{912} & 30 & 1\\
\dynkin{0, 0, 0, 0, 0, 2, 0} & \irrep{1463} & 55 & 0\\
\dynkin{0, 0, 0, 0, 1, 0, 0} & \irrep{1539} & 54 & 0\\
\dynkin{1, 0, 0, 0, 0, 1, 0} & \irrep{6480} & 270 & 1\\
\dynkin{2, 0, 0, 0, 0, 0, 0} & \irrep{7371} & 351 & 0\\
\dynkin{0, 1, 0, 0, 0, 0, 0} & \irrep{8645} & 390 & 0\\
\dynkin{0, 0, 0, 0, 0, 3, 0} & \irrep{24320} & 1440 & 1\\
\dynkin{0, 0, 0, 1, 0, 0, 0} & \irrep{27664} & 1430 & 1\\
\dynkin{0, 0, 0, 0, 0, 1, 1} & \irrep{40755} & 2145 & 0\\
\dynkin{0, 0, 0, 0, 1, 1, 0} & \irrep{51072} & 2832 & 1\\
\dynkin{1, 0, 0, 0, 0, 0, 1} & \irrep{86184} & 4995 & 1\\
\dynkin{1, 0, 0, 0, 0, 2, 0} & \irrep{150822} & 9450 & 0\\
\dynkin{1, 0, 0, 0, 1, 0, 0} & \irrep{152152} & 9152 & 0\\
\dynkin{3, 0, 0, 0, 0, 0, 0} & \irrep{238602} & 17940 & 0\\
\dynkin{0, 0, 0, 0, 0, 0, 2} & \irrep{253935} & 17820 & 0\\
\dynkin{0, 0, 0, 0, 0, 4, 0} & \irrep{293930} & 24310 & 0\\
\dynkin{2, 0, 0, 0, 0, 1, 0} & \irrep{320112} & 21762 & 1\\
\dynkin{0, 1, 0, 0, 0, 1, 0} & \irrep{362880} & 23760 & 1\\
\dynkin{0, 0, 1, 0, 0, 0, 0} & \irrep{365750} & 24750 & 0\\
\dynkin{1, 1, 0, 0, 0, 0, 0} & \irrep{573440} & 40960 & 0\\
\dynkin{0, 0, 0, 0, 2, 0, 0} & \irrep{617253} & 46410 & 0\\
\dynkin{0, 0, 0, 0, 1, 0, 1} & \irrep{861840} & 61830 & 1\\
\dynkin{0, 0, 0, 0, 0, 2, 1} & \irrep{885248} & 65728 & 1\\
\dynkin{0, 0, 0, 0, 1, 2, 0} & \irrep{915705} & 71145 & 0\\
\dynkin{0, 0, 0, 1, 0, 1, 0} & \irrep{980343} & 71253 & 0\\
\dynkin{1, 0, 0, 0, 0, 3, 0} & \irrep{2273920} & 194480 & 1\\
\dynkin{1, 0, 0, 1, 0, 0, 0} & \irrep{2282280} & 178035 & 1\\
\end{longtable}
\vspace{-10pt}
\begin{longtable}{lrrc}
\caption{\label{tab:E8Irreps}\E8 Irreps}\\
\toprule
\rowcolor{tableheadcolor}
Dynkin & Dimension & l/60 & Congruency\\
\rowcolor{tableheadcolor}label & (name) & (index) & class\\
\midrule
\endfirsthead
\caption[]{$E_8$ Irreps (continued)}\\
\toprule
\rowcolor{tableheadcolor}
Dynkin & Dimension & l/60 & Congruency\\
\rowcolor{tableheadcolor}label & (name) & (index) & class\\
\midrule
\endhead
\endfoot
\bottomrule
\endlastfoot
\dynkin{0, 0, 0, 0, 0, 0, 1, 0} & \irrep{248} & 1 & 0\\
\dynkin{1, 0, 0, 0, 0, 0, 0, 0} & \irrep{3875} & 25 & 0\\
\dynkin{0, 0, 0, 0, 0, 0, 2, 0} & \irrep{27000} & 225 & 0\\
\dynkin{0, 0, 0, 0, 0, 1, 0, 0} & \irrep{30380} & 245 & 0\\
\dynkin{0, 0, 0, 0, 0, 0, 0, 1} & \irrep{147250} & 1425 & 0\\
\dynkin{1, 0, 0, 0, 0, 0, 1, 0} & \irrep{779247} & 8379 & 0\\
\dynkin{0, 0, 0, 0, 0, 0, 3, 0} & \irrep{1763125} & 22750 & 0\\
\dynkin{0, 0, 0, 0, 1, 0, 0, 0} & \irrep{2450240} & 29640 & 0\\
\dynkin{0, 0, 0, 0, 0, 1, 1, 0} & \irrep{4096000} & 51200 & 0\\
\dynkin{2, 0, 0, 0, 0, 0, 0, 0} & \irrep{4881384} & 65610 & 0\\
\dynkin{0, 1, 0, 0, 0, 0, 0, 0} & \irrep{6696000} & 88200 & 0\\
\dynkin{0, 0, 0, 0, 0, 0, 1, 1} & \irrep{26411008} & 372736 & 0\\
\dynkin{1, 0, 0, 0, 0, 0, 2, 0} & \irrep{70680000} & 1083000 & 0\\
\dynkin{1, 0, 0, 0, 0, 1, 0, 0} & \irrep{76271625} & 1148175 & 0\\
\end{longtable}
\newpage
\begin{longtable}{lrrc}
\caption{\label{tab:F4Irreps}\F4 Irreps}\\
\toprule
\rowcolor{tableheadcolor}
Dynkin & Dimension & l/6 & Congruency\\
\rowcolor{tableheadcolor}label & (name) & (index) & class\\
\midrule
\endfirsthead
\caption[]{$F_4$ Irreps (continued)}\\
\toprule
\rowcolor{tableheadcolor}
Dynkin & Dimension & l/6 & Congruency\\
\rowcolor{tableheadcolor}label & (name) & (index) & class\\
\midrule
\endhead
\endfoot
\bottomrule
\endlastfoot
\dynkin{0, 0, 0, 1} & \irrep{26} & 1 & 0\\
\dynkin{1, 0, 0, 0} & \irrep{52} & 3 & 0\\
\dynkin{0, 0, 1, 0} & \irrep{273} & 21 & 0\\
\dynkin{0, 0, 0, 2} & \irrep{324} & 27 & 0\\
\dynkin{1, 0, 0, 1} & \irrep{1053} & 108 & 0\\
\dynkin{2, 0, 0, 0} & \irrep[1]{1053} & 135 & 0\\
\dynkin{0, 1, 0, 0} & \irrep{1274} & 147 & 0\\
\dynkin{0, 0, 0, 3} & \irrep{2652} & 357 & 0\\
\dynkin{0, 0, 1, 1} & \irrep{4096} & 512 & 0\\
\dynkin{1, 0, 1, 0} & \irrep{8424} & 1242 & 0\\
\dynkin{1, 0, 0, 2} & \irrep{10829} & 1666 & 0\\
\dynkin{3, 0, 0, 0} & \irrep{12376} & 2618 & 0\\
\dynkin{0, 0, 0, 4} & \irrep{16302} & 3135 & 0\\
\dynkin{2, 0, 0, 1} & \irrep{17901} & 3213 & 0\\
\dynkin{0, 1, 0, 1} & \irrep{19278} & 3213 & 0\\
\dynkin{0, 0, 2, 0} & \irrep{19448} & 3366 & 0\\
\dynkin{1, 1, 0, 0} & \irrep{29172} & 5610 & 0\\
\dynkin{0, 0, 1, 2} & \irrep{34749} & 6237 & 0\\
\dynkin{1, 0, 0, 3} & \irrep{76076} & 16093 & 0\\
\dynkin{0, 0, 0, 5} & \irrep{81081} & 20790 & 0\\
\dynkin{4, 0, 0, 0} & \irrep{100776} & 31008 & 0\\
\dynkin{1, 0, 1, 1} & \irrep{106496} & 21504 & 0\\
\dynkin{0, 1, 1, 0} & \irrep{107406} & 23409 & 0\\
\dynkin{2, 0, 1, 0} & \irrep{119119} & 27489 & 0\\
\dynkin{0, 1, 0, 2} & \irrep{160056} & 35910 & 0\\
\dynkin{2, 0, 0, 2} & \irrep[1]{160056} & 37962 & 0\\
\dynkin{3, 0, 0, 1} & \irrep{184756} & 49742 & 0\\
\dynkin{0, 0, 2, 1} & \irrep{205751} & 47481 & 0\\
\dynkin{0, 0, 1, 3} & \irrep{212992} & 51200 & 0\\
\dynkin{0, 2, 0, 0} & \irrep{226746} & 61047 & 0\\
\dynkin{2, 1, 0, 0} & \irrep{340119} & 95931 & 0\\
\dynkin{0, 0, 0, 6} & \irrep{342056} & 111826 & 0\\
\dynkin{1, 1, 0, 1} & \irrep{379848} & 94962 & 0\\
\dynkin{1, 0, 0, 4} & \irrep{412776} & 113778 & 0\\
\dynkin{1, 0, 2, 0} & \irrep{420147} & 107730 & 0\\
\dynkin{5, 0, 0, 0} & \irrep{627912} & 261630 & 0\\
\dynkin{0, 0, 3, 0} & \irrep{629356} & 181545 & 0\\
\dynkin{1, 0, 1, 2} & \irrep{787644} & 207009 & 0\\
\dynkin{0, 1, 0, 3} & \irrep{952952} & 274890 & 0\\
\dynkin{2, 0, 0, 3} & \irrep{1002456} & 302022 & 0\\
\dynkin{0, 0, 1, 4} & \irrep{1042899} & 320892 & 0\\
\dynkin{3, 0, 1, 0} & \irrep{1074944} & 351424 & 0\\
\dynkin{0, 1, 1, 1} & \irrep{1118208} & 311808 & 0\\
\dynkin{0, 0, 0, 7} & \irrep{1264120} & 510510 & 0\\
\dynkin{2, 0, 1, 1} & \irrep{1327104} & 387072 & 0\\
\dynkin{0, 0, 2, 2} & \irrep{1341522} & 395577 & 0\\
\dynkin{4, 0, 0, 1} & \irrep{1360476} & 505818 & 0\\
\dynkin{3, 0, 0, 2} & \irrep{1484406} & 494802 & 0\\
\dynkin{1, 1, 1, 0} & \irrep{1801371} & 554268 & 0\\
\dynkin{1, 0, 0, 5} & \irrep{1850212} & 640458 & 0\\
\dynkin{0, 2, 0, 1} & \irrep{2488563} & 829521 & 0\\
\dynkin{3, 1, 0, 0} & \irrep{2674763} & 1028755 & 0\\
\dynkin{1, 1, 0, 2} & \irrep{2792556} & 877149 & 0\\
\dynkin{6, 0, 0, 0} & \irrep{3187041} & 1716099 & 0\\
\dynkin{1, 2, 0, 0} & \irrep{3195192} & 1167474 & 0\\
\dynkin{0, 1, 2, 0} & \irrep{3508596} & 1192023 & 0\\
\dynkin{1, 0, 2, 1} & \irrep{3921372} & 1256850 & 0\\
\dynkin{2, 1, 0, 1} & \irrep{3955952} & 1369368 & 0\\
\dynkin{0, 0, 0, 8} & \irrep{4188834} & 2040714 & 0\\
\dynkin{1, 0, 1, 3} & \irrep{4313088} & 1423872 & 0\\
\dynkin{0, 0, 1, 5} & \irrep[1]{4313088} & 1645056 & 0\\
\dynkin{0, 1, 0, 4} & \irrep{4528953} & 1625778 & 0\\
\dynkin{2, 0, 2, 0} & \irrep{4582656} & 1615680 & 0\\
\dynkin{2, 0, 0, 4} & \irrep{4940676} & 1836918 & 0\\
\dynkin{0, 0, 3, 1} & \irrep{5218304} & 1856512 & 0\\
\dynkin{0, 0, 2, 3} & \irrep{6680856} & 2441082 & 0\\
\dynkin{0, 1, 1, 2} & \irrep{7113106} & 2462229 & 0\\
\dynkin{4, 0, 1, 0} & \irrep{7142499} & 3113397 & 0\\
\dynkin{1, 0, 0, 6} & \irrep{7147140} & 3023790 & 0\\
\dynkin{5, 0, 0, 1} & \irrep{7822737} & 3811077 & 0\\
\dynkin{3, 0, 0, 3} & \irrep{8498776} & 3432198 & 0\\
\dynkin{2, 0, 1, 2} & \irrep{8843094} & 3174444 & 0\\
\dynkin{0, 2, 1, 0} & \irrep{9683388} & 3848526 & 0\\
\dynkin{4, 0, 0, 2} & \irrep{10044008} & 4442542 & 0\\
\dynkin{1, 0, 3, 0} & \irrep{10482472} & 4031720 & 0\\
\dynkin{3, 0, 1, 1} & \irrep{10862592} & 4282368 & 0\\
\dynkin{0, 0, 4, 0} & \irrep{11955216} & 5057976 & 0\\
\dynkin{0, 0, 0, 9} & \irrep{12664184} & 7306260 & 0\\
\dynkin{0, 3, 0, 0} & \irrep{13530946} & 6245052 & 0\\
\dynkin{7, 0, 0, 0} & \irrep{13748020} & 9253475 & 0\\
\dynkin{1, 1, 0, 3} & \irrep{15031926} & 5781510 & 0\\
\dynkin{0, 0, 1, 6} & \irrep{15611882} & 7205484 & 0\\
\dynkin{0, 2, 0, 2} & \irrep{15997696} & 6460608 & 0\\
\dynkin{4, 1, 0, 0} & \irrep{16016924} & 8008462 & 0\\
\dynkin{2, 1, 1, 0} & \irrep{16665831} & 6837264 & 0\\
\dynkin{1, 1, 1, 1} & \irrep{16777216} & 6291456 & 0\\
\dynkin{0, 1, 0, 5} & \irrep{18206370} & 7936110 & 0\\
\dynkin{1, 0, 1, 4} & \irrep{19214624} & 7759752 & 0\\
\dynkin{2, 0, 0, 5} & \irrep{20407140} & 9157050 & 0\\
\dynkin{1, 0, 2, 2} & \irrep{23056488} & 9015678 & 0\\
\dynkin{1, 0, 0, 7} & \irrep{24488568} & 12401262 & 0\\
\dynkin{2, 2, 0, 0} & \irrep{26108082} & 12384603 & 0\\
\dynkin{2, 1, 0, 2} & \irrep{26476956} & 11032065 & 0\\
\dynkin{0, 0, 2, 4} & \irrep{27625000} & 12218750 & 0\\
\dynkin{0, 0, 3, 2} & \irrep{28068768} & 12055176 & 0\\
\dynkin{0, 1, 2, 1} & \irrep{28481544} & 11684736 & 0\\
\dynkin{3, 1, 0, 1} & \irrep[1]{28481544} & 12962754 & 0\\
\dynkin{1, 2, 0, 1} & \irrep{31702671} & 13819113 & 0\\
\end{longtable}
\newpage
\begin{longtable}{lrrc}
\caption{\label{tab:G2Irreps}\G2 Irreps}\\
\toprule
\rowcolor{tableheadcolor}
Dynkin & Dimension & l/2 & Congruency\\
\rowcolor{tableheadcolor}label & (name) & (index) & class\\
\midrule
\endfirsthead
\caption[]{$G_2$ Irreps (continued)}\\
\toprule
\rowcolor{tableheadcolor}
Dynkin & Dimension & l/2 & Congruency\\
\rowcolor{tableheadcolor}label & (name) & (index) & class\\
\midrule
\endhead
\endfoot
\bottomrule
\endlastfoot
\dynkin{1, 0} & \irrep{7} & 1 & 0\\
\dynkin{0, 1} & \irrep{14} & 4 & 0\\
\dynkin{2, 0} & \irrep{27} & 9 & 0\\
\dynkin{1, 1} & \irrep{64} & 32 & 0\\
\dynkin{3, 0} & \irrep{77} & 44 & 0\\
\dynkin{0, 2} & \irrep[1]{77} & 55 & 0\\
\dynkin{4, 0} & \irrep{182} & 156 & 0\\
\dynkin{2, 1} & \irrep{189} & 144 & 0\\
\dynkin{0, 3} & \irrep{273} & 351 & 0\\
\dynkin{1, 2} & \irrep{286} & 286 & 0\\
\dynkin{5, 0} & \irrep{378} & 450 & 0\\
\dynkin{3, 1} & \irrep{448} & 480 & 0\\
\dynkin{6, 0} & \irrep{714} & 1122 & 0\\
\dynkin{2, 2} & \irrep{729} & 972 & 0\\
\dynkin{0, 4} & \irrep{748} & 1496 & 0\\
\dynkin{1, 3} & \irrep{896} & 1472 & 0\\
\dynkin{4, 1} & \irrep{924} & 1320 & 0\\
\dynkin{7, 0} & \irrep{1254} & 2508 & 0\\
\dynkin{3, 2} & \irrep{1547} & 2652 & 0\\
\dynkin{5, 1} & \irrep{1728} & 3168 & 0\\
\dynkin{0, 5} & \irrep{1729} & 4940 & 0\\
\dynkin{2, 3} & \irrep{2079} & 4257 & 0\\
\dynkin{8, 0} & \irrep[1]{2079} & 5148 & 0\\
\dynkin{1, 4} & \irrep{2261} & 5491 & 0\\
\dynkin{4, 2} & \irrep{2926} & 6270 & 0\\
\dynkin{6, 1} & \irrep{3003} & 6864 & 0\\
\dynkin{9, 0} & \irrep{3289} & 9867 & 0\\
\dynkin{0, 6} & \irrep{3542} & 13662 & 0\\
\dynkin{3, 3} & \irrep{4096} & 10240 & 0\\
\dynkin{2, 4} & \irrep{4914} & 14274 & 0\\
\dynkin{7, 1} & \irrep{4928} & 13728 & 0\\
\dynkin{1, 5} & \irrep[1]{4928} & 16544 & 0\\
\dynkincomma{10, 0} & \irrep{5005} & 17875 & 0\\
\dynkin{5, 2} & \irrep{5103} & 13365 & 0\\
\dynkin{0, 7} & \irrep{6630} & 33150 & 0\\
\dynkin{4, 3} & \irrep{7293} & 21879 & 0\\
\dynkincomma{11, 0} & \irrep{7371} & 30888 & 0\\
\dynkin{8, 1} & \irrep{7722} & 25740 & 0\\
\dynkin{6, 2} & \irrep{8372} & 26312 & 0\\
\dynkin{3, 4} & \irrep{9177} & 31464 & 0\\
\dynkin{1, 6} & \irrep{9660} & 42780 & 0\\
\dynkin{2, 5} & \irrep{10206} & 39852 & 0\\
\dynkincomma{12, 0} & \irrep{10556} & 51272 & 0\\
\dynkin{0, 8} & \irrep{11571} & 72732 & 0\\
\dynkin{9, 1} & \irrep{11648} & 45760 & 0\\
\dynkin{5, 3} & \irrep{12096} & 42912 & 0\\
\dynkin{7, 2} & \irrep{13090} & 48620 & 0\\
\dynkincomma{13, 0} & \irrep{14756} & 82212 & 0\\
\dynkin{4, 4} & \irrep{15625} & 62500 & 0\\
\dynkincomma{10, 1} & \irrep{17017} & 77792 & 0\\
\dynkin{1, 7} & \irrep{17472} & 98592 & 0\\
\dynkin{3, 5} & \irrep{18304} & 82368 & 0\\
\dynkin{6, 3} & \irrep{19019} & 78793 & 0\\
\dynkin{0, 9} & \irrep{19096} & 147312 & 0\\
\dynkin{2, 6} & \irrep{19278} & 97308 & 0\\
\dynkin{8, 2} & \irrep{19683} & 85293 & 0\\
\dynkincomma{14, 0} & \irrep{20196} & 127908 & 0\\
\dynkincomma{11, 1} & \irrep{24192} & 127296 & 0\\
\dynkin{5, 4} & \irrep{24948} & 115236 & 0\\
\dynkincomma{15, 0} & \irrep{27132} & 193800 & 0\\
\dynkin{9, 2} & \irrep{28652} & 143260 & 0\\
\dynkin{7, 3} & \irrep{28672} & 137216 & 0\\
\dynkin{1, 8} & \irrep{29667} & 207669 & 0\\
\dynkin{4, 5} & \irrep{30107} & 154836 & 0\\
\dynkincomma{0, 10} & \irrep[1]{30107} & 279565 & 0\\
\dynkin{3, 6} & \irrep{33495} & 191400 & 0\\
\dynkincomma{12, 1} & \irrep{33592} & 201552 & 0\\
\dynkin{2, 7} & \irrep{33858} & 214434 & 0\\
\dynkincomma{16, 0} & \irrep{35853} & 286824 & 0\\
\dynkin{6, 4} & \irrep{37961} & 200651 & 0\\
\dynkincomma{10, 2} & \irrep{40579} & 231880 & 0\\
\dynkin{8, 3} & \irrep{41769} & 228735 & 0\\
\dynkincomma{0, 11} & \irrep{45695} & 502645 & 0\\
\dynkincomma{13, 1} & \irrep{45696} & 310080 & 0\\
\dynkin{5, 5} & \irrep{46656} & 272160 & 0\\
\dynkincomma{17, 0} & \irrep{46683} & 415701 & 0\\
\dynkin{1, 9} & \irrep{47872} & 406912 & 0\\
\dynkin{4, 6} & \irrep{53599} & 344565 & 0\\
\dynkin{7, 4} & \irrep{55614} & 333684 & 0\\
\dynkincomma{11, 2} & \irrep{56133} & 363528 & 0\\
\dynkin{2, 8} & \irrep[1]{56133} & 435699 & 0\\
\dynkin{3, 7} & \irrep{57344} & 405504 & 0\\
\dynkin{9, 3} & \irrep{59136} & 367488 & 0\\
\dynkincomma{18, 0} & \irrep{59983} & 591261 & 0\\
\dynkincomma{14, 1} & \irrep{61047} & 465120 & 0\\
\dynkincomma{0, 12} & \irrep{67158} & 863460 & 0\\
\dynkin{6, 5} & \irrep{69160} & 454480 & 0\\
\dynkincomma{1, 10} & \irrep{74074} & 751322 & 0\\
\dynkincomma{12, 2} & \irrep{76076} & 554268 & 0\\
\dynkincomma{19, 0} & \irrep{76153} & 826804 & 0\\
\dynkin{8, 4} & \irrep{79002} & 534204 & 0\\
\dynkincomma{15, 1} & \irrep{80256} & 682176 & 0\\
\dynkin{5, 6} & \irrep{81081} & 583011 & 0\\
\dynkincomma{10, 3} & \irrep{81719} & 572033 & 0\\
\dynkin{2, 9} & \irrep{88803} & 828828 & 0\\
\dynkin{4, 7} & \irrep{89726} & 704990 & 0\\
\dynkin{3, 8} & \irrep{93093} & 797940 & 0\\
\dynkincomma{20, 0} & \irrep{95634} & 1138500 & 0\\
\end{longtable}
\newpage
\section{\LaTeX\ Package}
\label{LaTeXPackage}
LieART comes with a \LaTeX\ package (\texttt{lieart.sty} in the subdirectory \texttt{latex/}) that defines commands to display irreps, roots and weights properly (see Table~\ref{tab:LaTeXCommands}),
which are displayed by LieART using the \texttt{LaTeXForm} on an appropriate expression, e.g.:
\begin{mathin}
DecomposeProduct[Irrep[SU3][8],Irrep[SU3][8]]//LaTeXForm
\end{mathin}
\begin{mathout}
\verb#$\irrep{1}+2(\irrep{8})+\irrep{10}+\irrepbar{10}+\irrep{27}$#
\end{mathout}
\newcommand{\textbackslash}{\textbackslash}
\begin{table}[!h]
\begin{center}
\begin{tabularx}{\textwidth}{llX}
\toprule
\textbf{Command Example} & \textbf{Output} & \textbf{Description}\\
\midrule
\texttt{\textbackslash irrep\{10\}} & \irrep{10} & dimensional name of irrep\\
\texttt{\textbackslash irrepbar\{10\}} & \irrepbar{10} & dimensional name of conjugated irrep\\
\texttt{\textbackslash irrep[2]\{175\}} & \irrep[2]{175} & number of primes as optional parameter\\
\texttt{\textbackslash irrepsub\{8\}\{s\}} & \irrepsub{8}{s} & irrep with subscript, e.g., irreps of \SO8 \\
\texttt{\textbackslash irrepbarsub\{10\}\{a\}} & \irrepbarsub{10}{a} & conjugated irrep with subscript, e.g., for labeling antisymmetric product\\
\texttt{\textbackslash dynkin\{0,1,0,0\}} & \dynkin{0,1,0,0} & Dynkin label of irrep\\
\texttt{\textbackslash dynkincomma\{0,10,0,0\}} & \dynkincomma{0,10,0,0} & for Dynkin labels with at least one digit larger then 9\newline
(also available as \texttt{\textbackslash root}, \texttt{\textbackslash rootorthogonal},\newline
\texttt{\textbackslash weightalpha} and \texttt{\textbackslash weightorthogonal}\newline for negative integers) \\
\texttt{\textbackslash weight\{0,1,0,{-}1\}} & \weight{0,1,0,{-1}} & weight in $\omega$-basis\\
\texttt{\textbackslash rootomega\{{-}1,2,{-}1,0\}} & \rootomega{{-}1,2,{-}1,0} & root in $\omega$-basis\\
\bottomrule
\end{tabularx}
\caption{\label{tab:LaTeXCommands}\LaTeX\ commands defined in supplemental style file \texttt{lieart.sty}}
\end{center}
\end{table}
\vspace{-20pt}
\section*{References}}
\journal{Computer Physics Communications}
\begin{document}
\input{frontmatter}
\tableofcontents
\newpage
\input{Introduction}
\newpage
\input{Installation}
\newpage
\input{QuickStartTutorial}
\pagebreak
\input{TheoryAndImplementation}
\input{LaTeXPackage}
\input{ConclusionsAndOutlook}
\input{Acknowledgments}
\bibliographystyle{elsarticle-num}
\section{Quick Start}
\label{sec:QuickStart}
\newcommand{\mmastring}[1]{\textcolor{gray}{#1}}
This section provides a tutorial introducing the most important and frequently used functions of LieART for Lie algebra and representation theory related calculations.
The functions are introduced based on simple examples that can easily be modified and extended to the desired application of the user.
Most examples use irreducible representations (irreps) of \SU5, which is less trivial than \SU3, which most textbooks use in examples, but small enough to return results
almost instantly on any recent computer. Also, \SU5 frequently appears in unified model building since the Standard-Model gauge group is one of its maximal subgroups.
This tutorial can also be found in the LieART documentation integrated into the Mathematica Documentation Center as ``Quick Start Tutorial'' under the section ``Tutorials''
on the LieART documentation home.
This loads the package:
\begin{mathin}
<<\:LieART`
\end{mathin}\par
\stepcounter{outcount}
\subsection{Entering Irreducible Representations}
Irreps are internally described by their Dynkin
label with a combined head of \com{Irrep} and the Lie algebra.
\definition{
\com{Irrep[\args{algebraClass}][\args{label}]} & Irrep described by its \args{algebraClass} and Dynkin \args{label}.
}{Entering irreps by Dynkin label.}
The \args{algebraClass} follows the Dynkin classification of simple Lie algebras
and can only be \com{A}, \com{B}, \com{C}, \com{D} for the classical algebras
and \com{E6}, \com{E7}, \com{E8}, \com{F4} and \com{G2} for the exceptional
algebras. The precise classical algebra is determined by the length of the
Dynkin label.
Entering the \irrepbar{10} of \SU5 by its Dynkin label and algebra class:
\begin{mathin}
Irrep[A][0,0,1,0]//FullForm
\end{mathin}
\begin{mathout}
Irrep[A][0,0,1,0]
\end{mathout}
In \com{StandardForm} the irrep is displayed in the textbook notation of Dynkin labels:
\begin{mathin}
Irrep[A][0,0,1,0]//StandardForm
\end{mathin}
\begin{mathout}
\dynkin{0,0,1,0}
\end{mathout}
In \com{TraditionalForm} (default) the irrep is displayed by its dimensional name:
\begin{mathin}
Irrep[A][0,0,1,0]
\end{mathin}
\begin{mathout}
\irrepbar{10}
\end{mathout}
The default output format type of LieART is \com{TraditionalForm}. The
associated user setting is overwritten for the notebook LieART is loaded in. For
\com{StandardForm} as output format type please set the global variable
\com{\$DefaultOutputForm=StandardForm}.
As an example for entering an irrep of an exceptional algebra, consider the \irrep{27} of \E6:
\begin{mathin}
Irrep[E6][1,0,0,0,0,0]
\end{mathin}
\begin{mathout}
\irrep{27}
\end{mathout}
Irreps may also be entered by their dimensional name (dimname). The package
transforms the irrep into its Dynkin label. Since the algebra of an irrep of a classical Lie algebra
becomes ambiguous with only the dimensional name, it has to be specified.
\definition{
\com{Irrep[\args{algebra}][\args{dimname}]} & Irrep entered by its \args{algebra} and \args{dimname}.
}{Entering irreps by name.}
\pagebreak
Entering the \irrepbar{10} of \SU5 by its dimensional name specifying the algebra by its Dynkin classification \A4:
\begin{mathin}
Irrep[A4][Bar[10]]//InputForm
\end{mathin}
\begin{mathout}
Irrep[A][0,0,1,0]
\end{mathout}
The traditional name of the algebra \SU{5} may also be used:
\begin{mathin}
Irrep[SU5][Bar[10]]//InputForm
\end{mathin}
\begin{mathout}
Irrep[A][0,0,1,0]
\end{mathout}
Irreps of product algebras like $\SU3{\otimes}\SU2{\otimes}\U1$ are specified by
\com{ProductIrrep} with the individual irreps of simple Lie algebras as arguments.
\definition{
\com{ProductIrrep[\args{irreps}]} & Head of product \args{irreps}, gathering irreps of simple Lie algebras.
}{Product irreps.}
The product irrep $(\irrep{3}, \irrepbar{3})$ of $\SU3{\otimes}\SU3$:
\begin{mathin}
ProductIrrep[Irrep[SU3][3],Irrep[SU3][Bar[3]]]
\end{mathin}
\begin{mathout}
(\irrep{3},\irrepbar{3})
\end{mathout}
\begin{mathin}
\%//InputForm
\end{mathin}
\begin{mathout}
ProductIrrep[Irrep[A][1,0],Irrep[A][0,1]]
\end{mathout}
\begin{mathin}
ProductIrrep[Irrep[A][1,0],Irrep[A][0,1]]
\end{mathin}
\begin{mathout}
(\irrep{3},\irrepbar{3})
\end{mathout}
Take for example the left-handed quark doublet in the Standard-Model gauge group
$\SU3{\otimes}\SU2{\otimes}\U1$ (The \U1 charge is not typeset in bold face):
\begin{mathin}
ProductIrrep[Irrep[SU3][3],Irrep[SU2][2],Irrep[U1][1/3]]
\end{mathin}
\begin{mathout}
(\irrep{3},\irrep{2})\!($\mathtt{1/3}$)
\end{mathout}
\begin{mathin}
\%//InputForm
\end{mathin}
\begin{mathout}
ProductIrrep[Irrep[A][1,0],Irrep[A][1],Irrep[U][1/3]]
\end{mathout}
\subsection{Decomposing Tensor Products}
\definition{
\com{DecomposeProduct[\args{irreps}]} & Decomposes the tensor product of several \args{irreps}.
}{Tensor product decomposition.}
Decompose the tensor product $\irrep{3}{\otimes}\irrepbar{3}$ of \SU3:
\begin{mathin}
DecomposeProduct[Irrep[SU3][3],Irrep[SU3][Bar[3]]]
\end{mathin}
\begin{mathout}
$\irrep{1}+\irrep{8}$
\end{mathout}
Decompose the tensor product $\irrep{27}{\otimes}\irrepbar{27}$ of \E6:
\begin{mathin}
DecomposeProduct[Irrep[E6][27],Irrep[E6][Bar[27]]]
\end{mathin}
\begin{mathout}
$\irrep{1}+\irrep{78}+\irrep{650}$
\end{mathout}
Decompose the tensor product $\irrep{3}{\otimes}\irrep{3}{\otimes}\irrep{3}$ of \SU3:
\begin{mathin}
DecomposeProduct[Irrep[SU3][3],Irrep[SU3][3],Irrep[SU3][3]]
\end{mathin}
\begin{mathout}
$\irrep{1}+2(\irrep{8})+\irrep{10}$
\end{mathout}
Decompose the tensor product $\irrep{8}{\otimes}\irrep{8}$ of \SU3:
\begin{mathin}
DecomposeProduct[Irrep[SU3][8],Irrep[SU3][8]]
\end{mathin}
\begin{mathout}
$\irrep{1}+2(\irrep{8})+\irrep{10}+\irrepbar{10}+\irrep{27}$
\end{mathout}
Internally a sum of irreps is represented by \com{IrrepPlus} and \com{IrrepTimes}, an analog of the built-in functions \com{Plus} and \com{Times}:
\begin{mathin}
\%//InputForm
\end{mathin}
\par
\medskip
\begin{mathout}
IrrepPlus[Irrep[A][0,0],\:IrrepTimes[2,\:Irrep[A][1,1]],\linebreak Irrep[A][3,\,0],\:Irrep[A][0,3],\:Irrep[A][2,2]]
\end{mathout}
Results can be transformed into a list of irreps with \com{IrrepList}, suitable for further processing with functions like \com{Select} or \com{Cases}:
\begin{mathin}
\%//IrrepList
\end{mathin}
\begin{mathout}
\{\irrep{1},\irrep{8},\irrep{8},\irrep{10},\irrepbar{10},\irrep{27}\}
\end{mathout}
Decompose tensor products of product irreps:
\begin{mathin}
DecomposeProduct[\linebreak ProductIrrep[Irrep[SU3][3],Irrep[SU3][Bar[3]],Irrep[SU3][1]],\linebreak ProductIrrep[Irrep[SU3][Bar[3]],Irrep[SU3][3],Irrep[SU3][1]]]
\end{mathin}
\begin{mathout}
$(\irrep{1},\irrep{1},\irrep{1})+(\irrep{8},\irrep{1},\irrep{1})+(\irrep{1},\irrep{8},\irrep{1})+(\irrep{8},\irrep{8},\irrep{1})$
\end{mathout}
Decompose the tensor products $(\irrep{3},\,\irrep{2}){\otimes}(\irrepbar{3},\,\irrep{1})$ of $\SU3{\otimes}\SU2$:
\begin{mathin}
DecomposeProduct[\linebreak ProductIrrep[Irrep[SU3][3],Irrep[SU2][2]],\linebreak ProductIrrep[Irrep[SU3][Bar[3]],Irrep[SU2][1]]]
\end{mathin}
\begin{mathout}
$(\irrep{1},\irrep{2})+(\irrep{8},\irrep{2})$
\end{mathout}
Decompose the tensor product $\irrep{4}{\otimes}\irrep{4}{\otimes}\irrep{6}{\otimes}\irrep{15}$ of \SU4:
\begin{mathin}
DecomposeProduct[Irrep[SU4][4],Irrep[SU4][4],Irrep[SU4][6],Irrep[SU4][15]]
\end{mathin}
\begin{mathout}
$2(\irrep{1})+7(\irrep{15})+4(\irrep[1]{20})+\irrep{35}+5(\irrep{45})+3(\irrepbar{45})+3(\irrep{84})+2(\irrep{175})+\irrep{256}$
\end{mathout}
Decompose the tensor product $\irrepbar{10}{\otimes}\irrep{24}{\otimes}\irrep{45}$ of \SU5:
\begin{mathin}
DecomposeProduct[Irrep[SU5][Bar[10]],Irrep[SU5][24],Irrep[SU5][45]]
\end{mathin}
\begin{mathout}
$3(\irrepbar{5})+6(\irrepbar{45})+3(\irrepbar{50})+5(\irrepbar{70}) + 2(\irrepbar{105}) + \irrepbar[2]{175} + \newline
6(\irrepbar{280}) + 2(\irrepbar[1]{280}) + \irrepbar{420} + \ensuremath{\irrepbar[1]{450}} + 3(\irrepbar{480}) + 2(\irrepbar{720}) + \irrepbar{1120} + \irrepbar{2520}$
\end{mathout}
\subsection{Decomposition to Subalgebras}
\definition{
\com{DecomposeIrrep[\args{irrep},\,\args{subalgebra}]} & Decomposes \args{irrep} to the specified \args{subalgebra}.\\
\com{DecomposeIrrep[\args{pirrep},\,\args{subalgebra},\,\args{pos}]} & Decomposes \args{pirrep} at position \args{pos} of the product irrep \args{pirrep}.\\
}{Decompose irreps and product irreps.}
Decompose the \irrepbar{10} of \SU5 to $\SU3{\otimes}\SU2{\otimes}\U1$:
\begin{mathin}
DecomposeIrrep[Irrep[SU5][Bar[10]],ProductAlgebra[SU3,SU2,U1]]
\end{mathin}
\begin{mathout}
$(\irrep{1},\irrep{1})(6)+(\irrep{3},\irrep{1})(-4)+(\irrepbar{3},\irrep{2})(1)$
\end{mathout}
\pagebreak
\enlargethispage{10pt}
Decompose the \irrep{10} and the \irrepbar{5} of \SU5 to $\SU3{\otimes}\SU2{\otimes}\U1$ (\com{DecomposeIrrep} is \com{Listable}):
\begin{mathin}
DecomposeIrrep[\{Irrep[SU5][10],Irrep[SU5][Bar[5]]\},ProductAlgebra[SU3,SU2,U1]]
\end{mathin}
\begin{mathout}
$\{(\irrepbar{3},\irrep{1})(4)+(\irrep{3},\irrep{2})(-1)+(\irrep{1},\irrep{1})(-6),\,(\irrepbar{3},\irrep{1})(-2)+(\irrep{1},\irrep{2})(3)\}$
\end{mathout}
Decompose the \irrep{16} of \SO{10} to $\SU5{\otimes}\U1$:
\begin{mathin}
DecomposeIrrep[Irrep[SO10][16],ProductAlgebra[SU5,U1]]
\end{mathin}
\begin{mathout}
$(\irrep{1})(-5)+(\irrepbar{5})(3)+(\irrep{10})(-1)$
\end{mathout}
Decompose the \irrep{27} of \E6 to $\SU3{\otimes}\SU3{\otimes}\SU3$:
\begin{mathin}
DecomposeIrrep[Irrep[E6][27],ProductAlgebra[SU3,SU3,SU3]]
\end{mathin}
\begin{mathout}
$(\irrep{3},\irrep{1},\irrep{3})+(\irrep{1},\irrep{3},\irrepbar{3})+(\irrepbar{3},\irrepbar{3},\irrep{1})$
\end{mathout}
Decompose the \SU3 irrep \irrep{3} in $(\irrep{24},\irrep{3})(-3)$ of $\SU5{\otimes}\SU3{\otimes}\U1$ to
$\SU2{\otimes}\text{U}'(1)$,\newline i.e., $\SU5{\otimes}\SU3{\otimes}\U1\to\SU5{\otimes}\SU2{\otimes}\text{U}'(1){\otimes}\U1$:
\begin{mathin}
DecomposeIrrep[ProductIrrep[Irrep[SU5][24],Irrep[SU3][3],Irrep[U1][-3]], ProductAlgebra[SU2,U1],2]
\end{mathin}
\begin{mathout}
$(\irrep{24},\irrep{1})(-2)(-3)+(\irrep{24},\irrep{2})(1)(-3)$
\end{mathout}
The same decomposition as above displayed as branching rule:
\begin{mathin}
IrrepRule[\slot,DecomposeIrrep[\slot,ProductAlgebra[SU2,U1],2]]\&@
ProductIrrep[Irrep[SU5][24],Irrep[SU3][3],Irrep[U1][-3]]
\end{mathin}
\begin{mathout}
$(\irrep{24},\irrep{3})(-3)\to(\irrep{24},\irrep{1})(-2)(-3)+(\irrep{24},\irrep{2})(1)(-3)$
\end{mathout}
Branching rules for all totally antisymmetric irreps, so-called basic irreps, of \SU6 to $\SU3{\otimes}\SU3{\otimes}\U1$:
\begin{mathin}
IrrepRule[\slot,DecomposeIrrep[\slot,ProductAlgebra[SU3,SU3,U1]]]\&/@\,\newline BasicIrreps[SU6]//TableForm
\end{mathin}
\begin{mathout}\hangindent=0ex%
$\irrep{6}\rightarrow(\irrep{3},\irrep{1})(1)+(\irrep{1},\irrep{3})(-1)$\newline
$\irrep{15}\rightarrow(\irrepbar{3},\irrep{1})(2)+(\irrep{1},\irrepbar{3})(-2)+(\irrep{3},\irrep{3})(0)$\newline
$\irrep{20}\rightarrow(\irrep{1},\irrep{1})(3)+(\irrep{1},\irrep{1})(-3)+(\irrep{3},\irrepbar{3})(-1)+(\irrepbar{3},\irrep{3})(1)$\newline
$\irrepbar{15}\rightarrow(\irrep{3},\irrep{1})(-2)+(\irrep{1},\irrep{3})(2)+(\irrepbar{3},\irrepbar{3})(0)$\newline
$\irrepbar{6}\rightarrow(\irrepbar{3},\irrep{1})(-1)+(\irrep{1},\irrepbar{3})(1)$\newline
\end{mathout}
\vspace{-15pt}
\subsection{Young Tableaux}
\Yautoscale0
\Yboxdim13pt
The irreps of \SU{N} have a correspondence to Young tableaux, which can be displayed by \com{YoungTableau}.
\definition{
\com{YoungTableau[\args{irrep}]} & Displays the Young tableau associated with an \SU{N} \args{irrep}.\\
}{Young tableaux.}
Young tableau of the \irrep{720} of \SU5:
\begin{mathin}
YoungTableau[Irrep[A][1,2,0,1]]
\end{mathin}
\begin{mathout}\label{out:YoungTableau720SU5}
\large\yng(4,3,1,1)
\end{mathout}
Display Young tableaux of \SU4 irreps with a maximum of one column per box count:
\begin{mathin}
Row[Row[\{\textcolor{DarkGreen}{\#},"{:}\ ",YoungTableau[\textcolor{DarkGreen}{\#}]\}]\&/@\newline
SortBy[Irrep[A]@@@Tuples[\{0,1\},3],Dim],Spacer[10]]
\end{mathin}
\newcommand{\irrepandtableau}[2]{\irrep{#1}{:}\:#2\quad}
\begin{mathout}
$\irrepandtableau{\irrep{1}}{\bullet}
\irrepandtableau{\irrepbar{4}}{\yng(1,1,1)}
\irrepandtableau{\irrep{4}}{\yng(1)}
\irrepandtableau{\irrep{6}}{\yng(1,1)}
\irrepandtableau{\irrep{15}}{\yng(2,1,1)}
\irrepandtableau{\irrep{20}}{\yng(2,2,1)}
\irrepandtableau{\irrepbar{20}}{\yng(2,1)}
\irrepandtableau{\irrep{64}}{\yng(3,2,1)}$
\end{mathout}
\subsubsection{\SU{N}}
\vspace{-10pt}
\enlargethispage{10pt}
\begin{longtable}{rcl}
\caption{\label{tab:SU2TensorProducts}SU(2) Tensor Products}\\
\toprule
\endfirsthead
\caption[]{SU(2) Tensor Products (continued)}\\
\endhead
\bottomrule
\endlastfoot
$\irrep{2}\times\irrep{2}$ & = & $\irrep{1}+\irrep{3}$\\
$\irrep{3}\times\irrep{2}$ & = & $\irrep{2}+\irrep{4}$\\
$\irrep{3}\times\irrep{3}$ & = & $\irrep{1}+\irrep{3}+\irrep{5}$\\
$\irrep{4}\times\irrep{2}$ & = & $\irrep{3}+\irrep{5}$\\
$\irrep{4}\times\irrep{3}$ & = & $\irrep{2}+\irrep{4}+\irrep{6}$\\
$\irrep{4}\times\irrep{4}$ & = & $\irrep{1}+\irrep{3}+\irrep{5}+\irrep{7}$\\
$\irrep{5}\times\irrep{2}$ & = & $\irrep{4}+\irrep{6}$\\
$\irrep{5}\times\irrep{3}$ & = & $\irrep{3}+\irrep{5}+\irrep{7}$\\
$\irrep{5}\times\irrep{4}$ & = & $\irrep{2}+\irrep{4}+\irrep{6}+\irrep{8}$\\
$\irrep{5}\times\irrep{5}$ & = & $\irrep{1}+\irrep{3}+\irrep{5}+\irrep{7}+\irrep{9}$\\
$\irrep{6}\times\irrep{2}$ & = & $\irrep{5}+\irrep{7}$\\
$\irrep{6}\times\irrep{3}$ & = & $\irrep{4}+\irrep{6}+\irrep{8}$\\
$\irrep{6}\times\irrep{4}$ & = & $\irrep{3}+\irrep{5}+\irrep{7}+\irrep{9}$\\
$\irrep{6}\times\irrep{5}$ & = & $\irrep{2}+\irrep{4}+\irrep{6}+\irrep{8}+\irrep{10}$\\
$\irrep{6}\times\irrep{6}$ & = & $\irrep{1}+\irrep{3}+\irrep{5}+\irrep{7}+\irrep{9}+\irrep{11}$\\
$\irrep{7}\times\irrep{2}$ & = & $\irrep{6}+\irrep{8}$\\
$\irrep{7}\times\irrep{3}$ & = & $\irrep{5}+\irrep{7}+\irrep{9}$\\
$\irrep{7}\times\irrep{4}$ & = & $\irrep{4}+\irrep{6}+\irrep{8}+\irrep{10}$\\
$\irrep{7}\times\irrep{5}$ & = & $\irrep{3}+\irrep{5}+\irrep{7}+\irrep{9}+\irrep{11}$\\
$\irrep{7}\times\irrep{6}$ & = & $\irrep{2}+\irrep{4}+\irrep{6}+\irrep{8}+\irrep{10}+\irrep{12}$\\
$\irrep{7}\times\irrep{7}$ & = & $\irrep{1}+\irrep{3}+\irrep{5}+\irrep{7}+\irrep{9}+\irrep{11}+\irrep{13}$\\
$\irrep{8}\times\irrep{2}$ & = & $\irrep{7}+\irrep{9}$\\
$\irrep{8}\times\irrep{3}$ & = & $\irrep{6}+\irrep{8}+\irrep{10}$\\
$\irrep{8}\times\irrep{4}$ & = & $\irrep{5}+\irrep{7}+\irrep{9}+\irrep{11}$\\
$\irrep{8}\times\irrep{5}$ & = & $\irrep{4}+\irrep{6}+\irrep{8}+\irrep{10}+\irrep{12}$\\
$\irrep{8}\times\irrep{6}$ & = & $\irrep{3}+\irrep{5}+\irrep{7}+\irrep{9}+\irrep{11}+\irrep{13}$\\
$\irrep{8}\times\irrep{7}$ & = & $\irrep{2}+\irrep{4}+\irrep{6}+\irrep{8}+\irrep{10}+\irrep{12}+\irrep{14}$\\
$\irrep{8}\times\irrep{8}$ & = & $\irrep{1}+\irrep{3}+\irrep{5}+\irrep{7}+\irrep{9}+\irrep{11}+\irrep{13}+\irrep{15}$\\
$\irrep{9}\times\irrep{2}$ & = & $\irrep{8}+\irrep{10}$\\
$\irrep{9}\times\irrep{3}$ & = & $\irrep{7}+\irrep{9}+\irrep{11}$\\
$\irrep{9}\times\irrep{4}$ & = & $\irrep{6}+\irrep{8}+\irrep{10}+\irrep{12}$\\
$\irrep{9}\times\irrep{5}$ & = & $\irrep{5}+\irrep{7}+\irrep{9}+\irrep{11}+\irrep{13}$\\
$\irrep{9}\times\irrep{6}$ & = & $\irrep{4}+\irrep{6}+\irrep{8}+\irrep{10}+\irrep{12}+\irrep{14}$\\
$\irrep{9}\times\irrep{7}$ & = & $\irrep{3}+\irrep{5}+\irrep{7}+\irrep{9}+\irrep{11}+\irrep{13}+\irrep{15}$\\
$\irrep{9}\times\irrep{8}$ & = & $\irrep{2}+\irrep{4}+\irrep{6}+\irrep{8}+\irrep{10}+\irrep{12}+\irrep{14}+\irrep{16}$\\
$\irrep{9}\times\irrep{9}$ & = & $\irrep{1}+\irrep{3}+\irrep{5}+\irrep{7}+\irrep{9}+\irrep{11}+\irrep{13}+\irrep{15}+\irrep{17}$\\
$\irrep{10}\times\irrep{2}$ & = & $\irrep{9}+\irrep{11}$\\
$\irrep{10}\times\irrep{3}$ & = & $\irrep{8}+\irrep{10}+\irrep{12}$\\
$\irrep{10}\times\irrep{4}$ & = & $\irrep{7}+\irrep{9}+\irrep{11}+\irrep{13}$\\
$\irrep{10}\times\irrep{5}$ & = & $\irrep{6}+\irrep{8}+\irrep{10}+\irrep{12}+\irrep{14}$\\
$\irrep{10}\times\irrep{6}$ & = & $\irrep{5}+\irrep{7}+\irrep{9}+\irrep{11}+\irrep{13}+\irrep{15}$\\
$\irrep{10}\times\irrep{7}$ & = & $\irrep{4}+\irrep{6}+\irrep{8}+\irrep{10}+\irrep{12}+\irrep{14}+\irrep{16}$\\
$\irrep{10}\times\irrep{8}$ & = & $\irrep{3}+\irrep{5}+\irrep{7}+\irrep{9}+\irrep{11}+\irrep{13}+\irrep{15}+\irrep{17}$\\
$\irrep{10}\times\irrep{9}$ & = & $\irrep{2}+\irrep{4}+\irrep{6}+\irrep{8}+\irrep{10}+\irrep{12}+\irrep{14}+\irrep{16}+\irrep{18}$\\
$\irrep{10}\times\irrep{10}$ & = & $\irrep{1}+\irrep{3}+\irrep{5}+\irrep{7}+\irrep{9}+\irrep{11}+\irrep{13}+\irrep{15}+\irrep{17}+\irrep{19}$\\
\end{longtable}
\newpage
\begin{longtable}{rcl}
\caption{\label{tab:SU3TensorProducts}SU(3) Tensor Products}\\
\toprule
\endfirsthead
\caption[]{SU(3) Tensor Products (continued)}\\
\endhead
\bottomrule
\endlastfoot
$\irrepbar{3}\times\irrep{3}$ & = & $\irrep{1}+\irrep{8}$\\
$\irrep{3}\times\irrep{3}$ & = & $\irrepbar{3}+\irrep{6}$\\
$\irrepbar{6}\times\irrep{3}$ & = & $\irrepbar{3}+\irrepbar{15}$\\
$\irrep{6}\times\irrep{3}$ & = & $\irrep{8}+\irrep{10}$\\
$\irrepbar{6}\times\irrepbar{6}$ & = & $\irrep{6}+\irrepbar{15}+\irrepbar[1]{15}$\\
$\irrep{6}\times\irrepbar{6}$ & = & $\irrep{1}+\irrep{8}+\irrep{27}$\\
$\irrep{8}\times\irrep{3}$ & = & $\irrep{3}+\irrepbar{6}+\irrep{15}$\\
$\irrep{8}\times\irrepbar{6}$ & = & $\irrep{3}+\irrepbar{6}+\irrep{15}+\irrep{24}$\\
$\irrep{8}\times\irrep{8}$ & = & $\irrep{1}+2(\irrep{8})+\irrep{10}+\irrepbar{10}+\irrep{27}$\\
$\irrepbar{10}\times\irrep{3}$ & = & $\irrepbar{6}+\irrep{24}$\\
$\irrep{10}\times\irrep{3}$ & = & $\irrep{15}+\irrep[1]{15}$\\
$\irrepbar{10}\times\irrepbar{6}$ & = & $\irrep{15}+\irrep{21}+\irrep{24}$\\
$\irrep{10}\times\irrepbar{6}$ & = & $\irrep{3}+\irrep{15}+\irrep{42}$\\
$\irrep{10}\times\irrep{8}$ & = & $\irrep{8}+\irrep{10}+\irrep{27}+\irrep{35}$\\
$\irrepbar{10}\times\irrep{10}$ & = & $\irrep{1}+\irrep{8}+\irrep{27}+\irrep{64}$\\
$\irrep{10}\times\irrep{10}$ & = & $\irrepbar{10}+\irrep{27}+\irrep{28}+\irrep{35}$\\
$\irrepbar[1]{15}\times\irrep{3}$ & = & $\irrepbar{10}+\irrepbar{35}$\\
$\irrepbar{15}\times\irrep{3}$ & = & $\irrep{8}+\irrepbar{10}+\irrep{27}$\\
$\irrep{15}\times\irrep{3}$ & = & $\irrep{6}+\irrepbar{15}+\irrepbar{24}$\\
$\irrep[1]{15}\times\irrep{3}$ & = & $\irrepbar{21}+\irrepbar{24}$\\
$\irrepbar[1]{15}\times\irrepbar{6}$ & = & $\irrep{27}+\irrepbar{28}+\irrepbar{35}$\\
$\irrepbar{15}\times\irrepbar{6}$ & = & $\irrep{8}+\irrep{10}+\irrepbar{10}+\irrep{27}+\irrepbar{35}$\\
$\irrep{15}\times\irrepbar{6}$ & = & $\irrepbar{3}+\irrep{6}+\irrepbar{15}+\irrepbar{24}+\irrepbar{42}$\\
$\irrep[1]{15}\times\irrepbar{6}$ & = & $\irrep{6}+\irrepbar{24}+\irrepbar{60}$\\
$\irrep{15}\times\irrep{8}$ & = & $\irrep{3}+\irrepbar{6}+2(\irrep{15})+\irrep[1]{15}+\irrep{24}+\irrep{42}$\\
$\irrep[1]{15}\times\irrep{8}$ & = & $\irrep{15}+\irrep[1]{15}+\irrep{42}+\irrep{48}$\\
$\irrepbar[1]{15}\times\irrep{10}$ & = & $\irrepbar{3}+\irrepbar{15}+\irrepbar{42}+\irrepbar{90}$\\
$\irrepbar{15}\times\irrep{10}$ & = & $\irrepbar{3}+\irrep{6}+\irrepbar{15}+\irrepbar{24}+\irrepbar{42}+\irrepbar{60}$\\
$\irrep{15}\times\irrep{10}$ & = & $\irrepbar{6}+\irrep{15}+\irrep[1]{15}+\irrep{24}+\irrep{42}+\irrep{48}$\\
$\irrep[1]{15}\times\irrep{10}$ & = & $\irrep{24}+\irrep{36}+\irrep{42}+\irrep{48}$\\
$\irrepbar[1]{15}\times\irrep{15}$ & = & $\irrep{8}+\irrepbar{10}+\irrep{27}+\irrepbar{35}+\irrep{64}+\irrepbar{81}$\\
$\irrepbar[1]{15}\times\irrep[1]{15}$ & = & $\irrep{1}+\irrep{8}+\irrep{27}+\irrep{64}+\irrep{125}$\\
$\irrepbar{15}\times\irrep{15}$ & = & $\irrep{1}+2(\irrep{8})+\irrep{10}+\irrepbar{10}+2(\irrep{27})+\irrep{35}+\irrepbar{35}+\irrep{64}$\\
$\irrep{15}\times\irrep{15}$ & = & $\irrepbar{3}+\irrep{6}+2(\irrepbar{15})+\irrepbar[1]{15}+\irrepbar{21}+2(\irrepbar{24})+\irrepbar{42}+\irrepbar{60}$\\
$\irrep[1]{15}\times\irrep{15}$ & = & $\irrepbar{15}+\irrepbar{21}+\irrepbar{24}+\irrepbar{42}+\irrepbar{60}+\irrepbar{63}$\\
$\irrep[1]{15}\times\irrep[1]{15}$ & = & $\irrepbar[1]{15}+\irrepbar{42}+\irrepbar{45}+\irrepbar{60}+\irrepbar{63}$\\
$\irrep{21}\times\irrep{3}$ & = & $\irrepbar[1]{15}+\irrepbar{48}$\\
$\irrepbar{21}\times\irrep{3}$ & = & $\irrep{28}+\irrep{35}$\\
$\irrep{21}\times\irrepbar{6}$ & = & $\irrepbar{36}+\irrepbar{42}+\irrepbar{48}$\\
$\irrepbar{21}\times\irrepbar{6}$ & = & $\irrep{10}+\irrep{35}+\irrep{81}$\\
$\irrep{21}\times\irrep{8}$ & = & $\irrep{21}+\irrep{24}+\irrep{60}+\irrep{63}$\\
$\irrep{21}\times\irrep{10}$ & = & $\irrepbar{6}+\irrep{24}+\irrep{60}+\irrep{120}$\\
$\irrepbar{21}\times\irrep{10}$ & = & $\irrepbar{42}+\irrepbar{45}+\irrepbar{60}+\irrepbar{63}$\\
$\irrep{21}\times\irrep{15}$ & = & $\irrepbar{15}+\irrepbar[1]{15}+\irrepbar{42}+\irrepbar{48}+\irrepbar{90}+\irrepbar{105}$\\
$\irrep{21}\times\irrep[1]{15}$ & = & $\irrepbar{3}+\irrepbar{15}+\irrepbar{42}+\irrepbar{90}+\irrepbar{165}$\\
$\irrepbar{21}\times\irrep{15}$ & = & $\irrep{27}+\irrep{28}+\irrep{35}+\irrep{64}+\irrep{80}+\irrep{81}$\\
$\irrepbar{21}\times\irrep[1]{15}$ & = & $\irrepbar{35}+\irrep{55}+\irrep{64}+\irrep{80}+\irrep{81}$\\
$\irrep{21}\times\irrep{21}$ & = & $\irrepbar{21}+\irrepbar{60}+\irrepbar{66}+\irrepbar{90}+\irrepbar{99}+\irrepbar{105}$\\
$\irrepbar{21}\times\irrep{21}$ & = & $\irrep{1}+\irrep{8}+\irrep{27}+\irrep{64}+\irrep{125}+\irrep{216}$\\
$\irrep{24}\times\irrep{3}$ & = & $\irrepbar{15}+\irrepbar[1]{15}+\irrepbar{42}$\\
$\irrepbar{24}\times\irrep{3}$ & = & $\irrep{10}+\irrep{27}+\irrep{35}$\\
$\irrep{24}\times\irrepbar{6}$ & = & $\irrepbar{15}+\irrepbar[1]{15}+\irrepbar{24}+\irrepbar{42}+\irrepbar{48}$\\
$\irrepbar{24}\times\irrepbar{6}$ & = & $\irrep{8}+\irrep{10}+\irrep{27}+\irrep{35}+\irrep{64}$\\
$\irrep{24}\times\irrep{8}$ & = & $\irrepbar{6}+\irrep{15}+\irrep{21}+2(\irrep{24})+\irrep{42}+\irrep{60}$\\
$\irrep{24}\times\irrep{10}$ & = & $\irrep{3}+\irrepbar{6}+\irrep{15}+\irrep{24}+\irrep{42}+\irrep{60}+\irrep{90}$\\
$\irrepbar{24}\times\irrep{10}$ & = & $\irrepbar{15}+\irrepbar[1]{15}+\irrepbar{21}+\irrepbar{24}+\irrepbar{42}+\irrepbar{60}+\irrepbar{63}$\\
$\irrep{24}\times\irrep{15}$ & = & $\irrepbar{3}+\irrep{6}+2(\irrepbar{15})+\irrepbar[1]{15}+\irrepbar{24}+2(\irrepbar{42})+\irrepbar{48}+\irrepbar{60}+\irrepbar{90}$\\
$\irrep{24}\times\irrep[1]{15}$ & = & $\irrepbar{3}+\irrep{6}+\irrepbar{15}+\irrepbar{24}+\irrepbar{42}+\irrepbar{60}+\irrepbar{90}+\irrepbar{120}$\\
$\irrepbar{24}\times\irrep{15}$ & = & $\irrep{8}+\irrep{10}+\irrepbar{10}+2(\irrep{27})+\irrep{28}+2(\irrep{35})+\irrepbar{35}+\irrep{64}+\irrep{81}$\\
$\irrepbar{24}\times\irrep[1]{15}$ & = & $\irrepbar{10}+\irrep{27}+\irrep{28}+\irrep{35}+\irrepbar{35}+\irrep{64}+\irrep{80}+\irrep{81}$\\
$\irrep{24}\times\irrep{21}$ & = & $\irrepbar{24}+\irrepbar{36}+\irrepbar{42}+\irrepbar{48}+\irrepbar{60}+\irrepbar{90}+\irrepbar{99}+\irrepbar{105}$\\
$\irrepbar{24}\times\irrep{21}$ & = & $\irrep{8}+\irrepbar{10}+\irrep{27}+\irrepbar{35}+\irrep{64}+\irrepbar{81}+\irrep{125}+\irrepbar{154}$\\
$\irrep{24}\times\irrep{24}$ & = & $\irrep{6}+\irrepbar{15}+\irrepbar[1]{15}+\irrepbar{21}+2(\irrepbar{24})+\irrepbar{36}+2(\irrepbar{42})+2(\irrepbar{48})+\irrepbar{60}+\irrepbar{90}+\irrepbar{105}$\\
$\irrepbar{24}\times\irrep{24}$ & = & $\irrep{1}+2(\irrep{8})+\irrep{10}+\irrepbar{10}+2(\irrep{27})+\irrep{35}+\irrepbar{35}+2(\irrep{64})+\irrep{81}+\irrepbar{81}+\irrep{125}$\\
$\irrep{27}\times\irrep{3}$ & = & $\irrep{15}+\irrep{24}+\irrep{42}$\\
$\irrep{27}\times\irrepbar{6}$ & = & $\irrepbar{6}+\irrep{15}+\irrep[1]{15}+\irrep{24}+\irrep{42}+\irrep{60}$\\
$\irrep{27}\times\irrep{8}$ & = & $\irrep{8}+\irrep{10}+\irrepbar{10}+2(\irrep{27})+\irrep{35}+\irrepbar{35}+\irrep{64}$\\
$\irrep{27}\times\irrep{10}$ & = & $\irrep{8}+\irrep{10}+\irrepbar{10}+\irrep{27}+\irrep{35}+\irrepbar{35}+\irrep{64}+\irrep{81}$\\
$\irrep{27}\times\irrep{15}$ & = & $\irrep{3}+\irrepbar{6}+2(\irrep{15})+\irrep[1]{15}+\irrep{21}+2(\irrep{24})+2(\irrep{42})+\irrep{48}+\irrep{60}+\irrep{90}$\\
$\irrep{27}\times\irrep[1]{15}$ & = & $\irrepbar{6}+\irrep{15}+\irrep[1]{15}+\irrep{24}+\irrep{42}+\irrep{48}+\irrep{60}+\irrep{90}+\irrep{105}$\\
$\irrep{27}\times\irrep{21}$ & = & $\irrep{15}+\irrep{21}+\irrep{24}+\irrep{42}+\irrep{60}+\irrep{63}+\irrep{90}+\irrep{120}+\irrep{132}$\\
$\irrep{27}\times\irrep{24}$ & = & $\irrep{3}+\irrepbar{6}+2(\irrep{15})+\irrep[1]{15}+\irrep{21}+2(\irrep{24})+2(\irrep{42})+\irrep{48}+2(\irrep{60})+\irrep{63}+\irrep{90}+\irrep{120}$\\
$\irrep{27}\times\irrep{27}$ & = & $\irrep{1}+2(\irrep{8})+\irrep{10}+\irrepbar{10}+3(\irrep{27})+\irrep{28}+\irrepbar{28}+2(\irrep{35})+2(\irrepbar{35})+2(\irrep{64})+\irrep{81}+\irrepbar{81}+\irrep{125}$\\
$\irrepbar{28}\times\irrep{3}$ & = & $\irrep{21}+\irrep{63}$\\
$\irrep{28}\times\irrep{3}$ & = & $\irrep{36}+\irrep{48}$\\
$\irrepbar{28}\times\irrepbar{6}$ & = & $\irrep{45}+\irrep{60}+\irrep{63}$\\
$\irrep{28}\times\irrepbar{6}$ & = & $\irrep[1]{15}+\irrep{48}+\irrep{105}$\\
$\irrep{28}\times\irrep{8}$ & = & $\irrep{28}+\irrep{35}+\irrep{80}+\irrep{81}$\\
$\irrepbar{28}\times\irrep{10}$ & = & $\irrepbar{10}+\irrepbar{35}+\irrepbar{81}+\irrepbar{154}$\\
$\irrep{28}\times\irrep{10}$ & = & $\irrep{55}+\irrep{64}+\irrep{80}+\irrep{81}$\\
$\irrepbar{28}\times\irrep{15}$ & = & $\irrep{21}+\irrep{24}+\irrep{60}+\irrep{63}+\irrep{120}+\irrep{132}$\\
$\irrepbar{28}\times\irrep[1]{15}$ & = & $\irrepbar{6}+\irrep{24}+\irrep{60}+\irrep{120}+\irrep{210}$\\
$\irrep{28}\times\irrep{15}$ & = & $\irrep{36}+\irrep{42}+\irrep{48}+\irrep{90}+\irrep{99}+\irrep{105}$\\
$\irrep{28}\times\irrep[1]{15}$ & = & $\irrep{60}+\irrep{66}+\irrep{90}+\irrep{99}+\irrep{105}$\\
$\irrepbar{28}\times\irrep{21}$ & = & $\irrep{48}+\irrep{78}+\irrep{90}+\irrep{120}+\irrep[1]{120}+\irrep{132}$\\
$\irrep{28}\times\irrep{21}$ & = & $\irrep{3}+\irrep{15}+\irrep{42}+\irrep{90}+\irrep{165}+\irrep{273}$\\
$\irrepbar{28}\times\irrep{24}$ & = & $\irrep{42}+\irrep{45}+\irrep{60}+\irrep{63}+\irrep{90}+\irrep{120}+\irrep[1]{120}+\irrep{132}$\\
$\irrep{28}\times\irrep{24}$ & = & $\irrep{15}+\irrep[1]{15}+\irrep{42}+\irrep{48}+\irrep{90}+\irrep{105}+\irrep{165}+\irrep{192}$\\
$\irrep{28}\times\irrep{27}$ & = & $\irrep{27}+\irrep{28}+\irrep{35}+\irrep{64}+\irrep{80}+\irrep{81}+\irrep{125}+\irrep{154}+\irrep{162}$\\
$\irrepbar{28}\times\irrep{28}$ & = & $\irrep{1}+\irrep{8}+\irrep{27}+\irrep{64}+\irrep{125}+\irrep{216}+\irrep{343}$\\
$\irrep{28}\times\irrep{28}$ & = & $\irrepbar{28}+\irrepbar{81}+\irrep{91}+\irrep{125}+\irrep{143}+\irrep{154}+\irrep{162}$\\
\end{longtable}
\newpage
\enlargethispage{10pt}
\begin{longtable}{rcl}
\caption{\label{tab:SU4TensorProducts}SU(4) Tensor Products}\\
\toprule
\endfirsthead
\caption[]{SU(4) Tensor Products (continued)}\\
\endhead
\bottomrule
\endlastfoot
$\irrepbar{4}\times\irrep{4}$ & = & $\irrep{1}+\irrep{15}$\\
$\irrep{4}\times\irrep{4}$ & = & $\irrep{6}+\irrep{10}$\\
$\irrep{6}\times\irrep{4}$ & = & $\irrepbar{4}+\irrepbar{20}$\\
$\irrep{6}\times\irrep{6}$ & = & $\irrep{1}+\irrep{15}+\irrep[1]{20}$\\
$\irrepbar{10}\times\irrep{4}$ & = & $\irrepbar{4}+\irrepbar{36}$\\
$\irrep{10}\times\irrep{4}$ & = & $\irrepbar{20}+\irrepbar[2]{20}$\\
$\irrep{10}\times\irrep{6}$ & = & $\irrep{15}+\irrep{45}$\\
$\irrepbar{10}\times\irrep{10}$ & = & $\irrep{1}+\irrep{15}+\irrep{84}$\\
$\irrep{10}\times\irrep{10}$ & = & $\irrep[1]{20}+\irrep{35}+\irrep{45}$\\
$\irrep{15}\times\irrep{4}$ & = & $\irrep{4}+\irrep{20}+\irrep{36}$\\
$\irrep{15}\times\irrep{6}$ & = & $\irrep{6}+\irrep{10}+\irrepbar{10}+\irrep{64}$\\
$\irrep{15}\times\irrep{10}$ & = & $\irrep{6}+\irrep{10}+\irrep{64}+\irrep{70}$\\
$\irrep{15}\times\irrep{15}$ & = & $\irrep{1}+2(\irrep{15})+\irrep[1]{20}+\irrep{45}+\irrepbar{45}+\irrep{84}$\\
$\irrep[2]{20}\times\irrep{4}$ & = & $\irrepbar{10}+\irrepbar{70}$\\
$\irrep{20}\times\irrep{4}$ & = & $\irrep{6}+\irrepbar{10}+\irrep{64}$\\
$\irrep[1]{20}\times\irrep{4}$ & = & $\irrep{20}+\irrep{60}$\\
$\irrepbar{20}\times\irrep{4}$ & = & $\irrep{15}+\irrep[1]{20}+\irrep{45}$\\
$\irrepbar[2]{20}\times\irrep{4}$ & = & $\irrep{35}+\irrep{45}$\\
$\irrep[2]{20}\times\irrep{6}$ & = & $\irrepbar{36}+\irrepbar[1]{84}$\\
$\irrep{20}\times\irrep{6}$ & = & $\irrepbar{4}+\irrepbar{20}+\irrepbar{36}+\irrepbar{60}$\\
$\irrep[1]{20}\times\irrep{6}$ & = & $\irrep{6}+\irrep{50}+\irrep{64}$\\
$\irrep[2]{20}\times\irrep{10}$ & = & $\irrepbar{4}+\irrepbar{36}+\irrepbar{160}$\\
$\irrep{20}\times\irrep{10}$ & = & $\irrepbar{4}+\irrepbar{20}+\irrepbar{36}+\irrepbar{140}$\\
$\irrep[1]{20}\times\irrep{10}$ & = & $\irrepbar{10}+\irrep{64}+\irrep{126}$\\
$\irrepbar{20}\times\irrep{10}$ & = & $\irrep{20}+\irrep{36}+\irrep{60}+\irrep[1]{84}$\\
$\irrepbar[2]{20}\times\irrep{10}$ & = & $\irrep{56}+\irrep{60}+\irrep[1]{84}$\\
$\irrep[2]{20}\times\irrep{15}$ & = & $\irrep{20}+\irrep[2]{20}+\irrep{120}+\irrep{140}$\\
$\irrep{20}\times\irrep{15}$ & = & $\irrep{4}+2(\irrep{20})+\irrep[2]{20}+\irrep{36}+\irrep{60}+\irrep{140}$\\
$\irrep[1]{20}\times\irrep{15}$ & = & $\irrep{15}+\irrep[1]{20}+\irrep{45}+\irrepbar{45}+\irrep{175}$\\
$\irrep[2]{20}\times\irrep[2]{20}$ & = & $\irrep{50}+\irrepbar[2]{84}+\irrepbar{126}+\irrepbar[2]{140}$\\
$\irrep[2]{20}\times\irrep{20}$ & = & $\irrep{64}+\irrepbar{70}+\irrepbar{126}+\irrepbar[2]{140}$\\
$\irrep[2]{20}\times\irrep[1]{20}$ & = & $\irrep{36}+\irrep{140}+\irrep{224}$\\
$\irrep{20}\times\irrep{20}$ & = & $\irrep{6}+\irrep{10}+\irrepbar{10}+\irrep{50}+2(\irrep{64})+\irrepbar{70}+\irrepbar{126}$\\
$\irrep[1]{20}\times\irrep{20}$ & = & $\irrep{4}+\irrep{20}+\irrep{36}+\irrep{60}+\irrep{140}+\irrep[1]{140}$\\
$\irrep[1]{20}\times\irrep[1]{20}$ & = & $\irrep{1}+\irrep{15}+\irrep[1]{20}+\irrep{84}+\irrep{105}+\irrep{175}$\\
$\irrepbar{20}\times\irrep{20}$ & = & $\irrep{1}+2(\irrep{15})+\irrep[1]{20}+\irrep{45}+\irrepbar{45}+\irrep{84}+\irrep{175}$\\
$\irrepbar[2]{20}\times\irrep[2]{20}$ & = & $\irrep{1}+\irrep{15}+\irrep{84}+\irrep[1]{300}$\\
$\irrepbar[2]{20}\times\irrep{20}$ & = & $\irrep{15}+\irrep{45}+\irrep{84}+\irrep{256}$\\
$\irrepbar{35}\times\irrep{4}$ & = & $\irrep[2]{20}+\irrep{120}$\\
$\irrep{35}\times\irrep{4}$ & = & $\irrep{56}+\irrep[1]{84}$\\
$\irrep{35}\times\irrep{6}$ & = & $\irrep{70}+\irrep[2]{140}$\\
$\irrepbar{35}\times\irrep{10}$ & = & $\irrepbar{10}+\irrepbar{70}+\irrepbar{270}$\\
$\irrep{35}\times\irrep{10}$ & = & $\irrep[2]{84}+\irrep{126}+\irrep[2]{140}$\\
$\irrep{35}\times\irrep{15}$ & = & $\irrep{35}+\irrep{45}+\irrep{189}+\irrep{256}$\\
$\irrepbar{35}\times\irrep[2]{20}$ & = & $\irrep[1]{120}+\irrep[1]{140}+\irrep{216}+\irrep{224}$\\
$\irrepbar{35}\times\irrep{20}$ & = & $\irrep{120}+\irrep{140}+\irrep{216}+\irrep{224}$\\
$\irrep{35}\times\irrep[2]{20}$ & = & $\irrep{4}+\irrep{36}+\irrep{160}+\irrep{500}$\\
$\irrep{35}\times\irrep{20}$ & = & $\irrep{36}+\irrep[1]{84}+\irrep{160}+\irrep{420}$\\
$\irrep{35}\times\irrep[1]{20}$ & = & $\irrep{84}+\irrep{256}+\irrep[1]{360}$\\
$\irrepbar{35}\times\irrep{35}$ & = & $\irrep{1}+\irrep{15}+\irrep{84}+\irrep[1]{300}+\irrep{825}$\\
$\irrep{35}\times\irrep{35}$ & = & $\irrep{105}+\irrep{165}+\irrep{280}+\irrep{315}+\irrep[1]{360}$\\
$\irrepbar{36}\times\irrep{4}$ & = & $\irrep{15}+\irrepbar{45}+\irrep{84}$\\
$\irrep{36}\times\irrep{4}$ & = & $\irrep{10}+\irrep{64}+\irrep{70}$\\
$\irrep{36}\times\irrep{6}$ & = & $\irrepbar{20}+\irrepbar[2]{20}+\irrepbar{36}+\irrepbar{140}$\\
$\irrepbar{36}\times\irrep{10}$ & = & $\irrep{4}+\irrep{20}+\irrep{36}+\irrep{140}+\irrep{160}$\\
$\irrep{36}\times\irrep{10}$ & = & $\irrepbar{20}+\irrepbar[2]{20}+\irrepbar{60}+\irrepbar{120}+\irrepbar{140}$\\
$\irrep{36}\times\irrep{15}$ & = & $\irrep{4}+\irrep{20}+2(\irrep{36})+\irrep{60}+\irrep[1]{84}+\irrep{140}+\irrep{160}$\\
$\irrepbar{36}\times\irrep[2]{20}$ & = & $\irrep[1]{20}+\irrepbar{35}+\irrepbar{45}+\irrep{175}+\irrepbar{189}+\irrepbar{256}$\\
$\irrepbar{36}\times\irrep{20}$ & = & $\irrep{15}+\irrep[1]{20}+\irrepbar{35}+\irrep{45}+2(\irrepbar{45})+\irrep{84}+\irrep{175}+\irrepbar{256}$\\
$\irrep{36}\times\irrep[2]{20}$ & = & $\irrep{6}+\irrepbar{10}+\irrep{64}+\irrepbar{70}+\irrepbar{270}+\irrep{300}$\\
$\irrep{36}\times\irrep{20}$ & = & $\irrep{6}+\irrep{10}+\irrepbar{10}+2(\irrep{64})+\irrep{70}+\irrepbar{70}+\irrep{126}+\irrep{300}$\\
$\irrep{36}\times\irrep[1]{20}$ & = & $\irrep{20}+\irrep[2]{20}+\irrep{36}+\irrep{60}+\irrep[1]{84}+\irrep{140}+\irrep{360}$\\
$\irrepbar{36}\times\irrep{35}$ & = & $\irrepbar{20}+\irrepbar[2]{20}+\irrepbar{120}+\irrepbar{140}+\irrepbar[1]{420}+\irrepbar{540}$\\
$\irrep{36}\times\irrep{35}$ & = & $\irrep{56}+\irrep{60}+\irrep[1]{84}+\irrep[2]{280}+\irrep{360}+\irrep{420}$\\
$\irrepbar{36}\times\irrep{36}$ & = & $\irrep{1}+2(\irrep{15})+\irrep[1]{20}+\irrep{45}+\irrepbar{45}+2(\irrep{84})+\irrep{175}+\irrep{256}+\irrepbar{256}+\irrep[1]{300}$\\
$\irrep{36}\times\irrep{36}$ & = & $\irrep{6}+\irrep{10}+\irrep{50}+2(\irrep{64})+2(\irrep{70})+\irrep{126}+\irrepbar{126}+\irrep[2]{140}+\irrep{270}+\irrep{300}$\\
$\irrepbar{45}\times\irrep{4}$ & = & $\irrep{20}+\irrep[2]{20}+\irrep{140}$\\
$\irrep{45}\times\irrep{4}$ & = & $\irrep{36}+\irrep{60}+\irrep[1]{84}$\\
$\irrep{45}\times\irrep{6}$ & = & $\irrep{10}+\irrep{64}+\irrep{70}+\irrep{126}$\\
$\irrepbar{45}\times\irrep{10}$ & = & $\irrep{6}+\irrepbar{10}+\irrep{64}+\irrepbar{70}+\irrep{300}$\\
$\irrep{45}\times\irrep{10}$ & = & $\irrep{50}+\irrep{64}+\irrep{70}+\irrep{126}+\irrep[2]{140}$\\
$\irrep{45}\times\irrep{15}$ & = & $\irrep{15}+\irrep[1]{20}+\irrep{35}+2(\irrep{45})+\irrep{84}+\irrep{175}+\irrep{256}$\\
$\irrepbar{45}\times\irrep[2]{20}$ & = & $\irrep{60}+\irrep{120}+\irrep{140}+\irrep[1]{140}+\irrep{216}+\irrep{224}$\\
$\irrepbar{45}\times\irrep{20}$ & = & $\irrep{20}+\irrep[2]{20}+\irrep{36}+\irrep{60}+\irrep{120}+2(\irrep{140})+\irrep[1]{140}+\irrep{224}$\\
$\irrep{45}\times\irrep[2]{20}$ & = & $\irrep{4}+\irrep{20}+\irrep{36}+\irrep{140}+\irrep{160}+\irrep{540}$\\
$\irrep{45}\times\irrep{20}$ & = & $\irrep{4}+\irrep{20}+2(\irrep{36})+\irrep{60}+\irrep[1]{84}+\irrep{140}+\irrep{160}+\irrep{360}$\\
$\irrep{45}\times\irrep[1]{20}$ & = & $\irrep{15}+\irrep{45}+\irrepbar{45}+\irrep{84}+\irrep{175}+\irrep{256}+\irrep{280}$\\
$\irrepbar{45}\times\irrep{35}$ & = & $\irrep{15}+\irrep{45}+\irrep{84}+\irrep{256}+\irrep[1]{300}+\irrep{875}$\\
$\irrep{45}\times\irrep{35}$ & = & $\irrep{175}+\irrep{189}+\irrep{256}+\irrep{280}+\irrep{315}+\irrep[1]{360}$\\
$\irrepbar{45}\times\irrep{36}$ & = & $\irrep{4}+2(\irrep{20})+\irrep[2]{20}+\irrep{36}+\irrep{60}+\irrep{120}+2(\irrep{140})+\irrep{160}+\irrep{360}+\irrep{540}$\\
$\irrep{45}\times\irrep{36}$ & = & $\irrep{20}+\irrep{36}+\irrep{56}+2(\irrep{60})+2(\irrep[1]{84})+\irrep{140}+\irrep[1]{140}+\irrep{160}+\irrep{360}+\irrep{420}$\\
$\irrepbar{45}\times\irrep{45}$ & = & $\irrep{1}+2(\irrep{15})+\irrep[1]{20}+\irrep{45}+\irrepbar{45}+2(\irrep{84})+\irrep{175}+\irrep{256}+\irrepbar{256}+\irrep[1]{300}+\irrep{729}$\\
$\irrep{45}\times\irrep{45}$ & = & $\irrep[1]{20}+\irrep{35}+\irrep{45}+\irrepbar{45}+\irrep{84}+\irrep{105}+2(\irrep{175})+\irrep{189}+2(\irrep{256})+\irrep{280}+\irrep[1]{360}$\\
$\irrep{50}\times\irrep{4}$ & = & $\irrepbar{60}+\irrepbar[1]{140}$\\
$\irrep{50}\times\irrep{6}$ & = & $\irrep[1]{20}+\irrep{105}+\irrep{175}$\\
$\irrep{50}\times\irrep{10}$ & = & $\irrepbar{45}+\irrep{175}+\irrep{280}$\\
$\irrep{50}\times\irrep{15}$ & = & $\irrep{50}+\irrep{64}+\irrep{126}+\irrepbar{126}+\irrep{384}$\\
$\irrep{50}\times\irrep[2]{20}$ & = & $\irrepbar[2]{20}+\irrepbar{140}+\irrepbar{360}+\irrepbar{480}$\\
$\irrep{50}\times\irrep{20}$ & = & $\irrepbar{20}+\irrepbar{60}+\irrepbar{140}+\irrepbar[1]{140}+\irrepbar[1]{280}+\irrepbar{360}$\\
$\irrep{50}\times\irrep[1]{20}$ & = & $\irrep{6}+\irrep{50}+\irrep{64}+\irrep{196}+\irrep{300}+\irrep{384}$\\
$\irrep{50}\times\irrep{35}$ & = & $\irrepbar{70}+\irrep{300}+\irrep{630}+\irrep{750}$\\
$\irrep{50}\times\irrep{36}$ & = & $\irrepbar{36}+\irrepbar{60}+\irrepbar[1]{84}+\irrepbar{140}+\irrepbar[1]{140}+\irrepbar{224}+\irrepbar{360}+\irrepbar{756}$\\
$\irrep{50}\times\irrep{45}$ & = & $\irrepbar{10}+\irrep{64}+\irrepbar{70}+\irrep{126}+\irrepbar{126}+\irrep{300}+\irrep{384}+\irrep[1]{540}+\irrep{630}$\\
$\irrep{50}\times\irrep{50}$ & = & $\irrep{1}+\irrep{15}+\irrep[1]{20}+\irrep{84}+\irrep{105}+\irrep{175}+\irrep[1]{300}+\irrep{336}+\irrep{729}+\irrep{735}$\\
\end{longtable}
\newpage
\begin{longtable}{rcl}
\caption{\label{tab:SU5TensorProducts}SU(5) Tensor Products}\\
\toprule
\endfirsthead
\caption[]{SU(5) Tensor Products (continued)}\\
\endhead
\bottomrule
\endlastfoot
$\irrepbar{5}\times\irrep{5}$ & = & $\irrep{1}+\irrep{24}$\\
$\irrep{5}\times\irrep{5}$ & = & $\irrep{10}+\irrep{15}$\\
$\irrepbar{10}\times\irrep{5}$ & = & $\irrepbar{5}+\irrepbar{45}$\\
$\irrep{10}\times\irrep{5}$ & = & $\irrepbar{10}+\irrepbar{40}$\\
$\irrepbar{10}\times\irrep{10}$ & = & $\irrep{1}+\irrep{24}+\irrep{75}$\\
$\irrep{10}\times\irrep{10}$ & = & $\irrepbar{5}+\irrepbar{45}+\irrepbar{50}$\\
$\irrepbar{15}\times\irrep{5}$ & = & $\irrepbar{5}+\irrepbar{70}$\\
$\irrep{15}\times\irrep{5}$ & = & $\irrepbar{35}+\irrepbar{40}$\\
$\irrepbar{15}\times\irrep{10}$ & = & $\irrep{24}+\irrepbar{126}$\\
$\irrep{15}\times\irrep{10}$ & = & $\irrepbar{45}+\irrepbar{105}$\\
$\irrepbar{15}\times\irrep{15}$ & = & $\irrep{1}+\irrep{24}+\irrep{200}$\\
$\irrep{15}\times\irrep{15}$ & = & $\irrepbar{50}+\irrepbar[1]{70}+\irrepbar{105}$\\
$\irrep{24}\times\irrep{5}$ & = & $\irrep{5}+\irrep{45}+\irrep{70}$\\
$\irrep{24}\times\irrep{10}$ & = & $\irrep{10}+\irrep{15}+\irrep{40}+\irrep{175}$\\
$\irrep{24}\times\irrep{15}$ & = & $\irrep{10}+\irrep{15}+\irrep{160}+\irrep{175}$\\
$\irrep{24}\times\irrep{24}$ & = & $\irrep{1}+2(\irrep{24})+\irrep{75}+\irrep{126}+\irrepbar{126}+\irrep{200}$\\
$\irrep{35}\times\irrep{5}$ & = & $\irrepbar{15}+\irrepbar{160}$\\
$\irrepbar{35}\times\irrep{5}$ & = & $\irrepbar[1]{70}+\irrepbar{105}$\\
$\irrep{35}\times\irrep{10}$ & = & $\irrepbar{70}+\irrepbar[1]{280}$\\
$\irrepbar{35}\times\irrep{10}$ & = & $\irrep{126}+\irrep{224}$\\
$\irrep{35}\times\irrep{15}$ & = & $\irrepbar{5}+\irrepbar{70}+\irrepbar[1]{450}$\\
$\irrepbar{35}\times\irrep{15}$ & = & $\irrep[1]{126}+\irrep[1]{175}+\irrep{224}$\\
$\irrep{35}\times\irrep{24}$ & = & $\irrep{35}+\irrep{40}+\irrep[1]{315}+\irrep{450}$\\
$\irrep{35}\times\irrep{35}$ & = & $\irrepbar[2]{175}+\irrepbar[1]{210}+\irrepbar{420}+\irrepbar[1]{420}$\\
$\irrepbar{35}\times\irrep{35}$ & = & $\irrep{1}+\irrep{24}+\irrep{200}+\irrep{1000}$\\
$\irrep{40}\times\irrep{5}$ & = & $\irrepbar{10}+\irrepbar{15}+\irrepbar{175}$\\
$\irrepbar{40}\times\irrep{5}$ & = & $\irrepbar{45}+\irrepbar{50}+\irrepbar{105}$\\
$\irrep{40}\times\irrep{10}$ & = & $\irrepbar{5}+\irrepbar{45}+\irrepbar{70}+\irrepbar{280}$\\
$\irrepbar{40}\times\irrep{10}$ & = & $\irrep{24}+\irrep{75}+\irrep{126}+\irrep[1]{175}$\\
$\irrep{40}\times\irrep{15}$ & = & $\irrepbar{5}+\irrepbar{45}+\irrepbar{70}+\irrepbar{480}$\\
$\irrepbar{40}\times\irrep{15}$ & = & $\irrep{75}+\irrep{126}+\irrep[1]{175}+\irrep{224}$\\
$\irrep{40}\times\irrep{24}$ & = & $\irrep{10}+\irrep{35}+2(\irrep{40})+\irrep{175}+\irrep{210}+\irrep{450}$\\
$\irrep{40}\times\irrep{35}$ & = & $\irrepbar{280}+\irrepbar[1]{280}+\irrepbar{420}+\irrepbar[1]{420}$\\
$\irrepbar{40}\times\irrep{35}$ & = & $\irrep{24}+\irrepbar{126}+\irrep{200}+\irrepbar[1]{1050}$\\
$\irrep{40}\times\irrep{40}$ & = & $\irrepbar{45}+\irrepbar{50}+\irrepbar{70}+\irrepbar[2]{175}+2(\irrepbar{280})+\irrepbar[1]{280}+\irrepbar{420}$\\
$\irrepbar{40}\times\irrep{40}$ & = & $\irrep{1}+2(\irrep{24})+\irrep{75}+\irrep{126}+\irrepbar{126}+\irrep{200}+\irrep{1024}$\\
$\irrep{45}\times\irrep{5}$ & = & $\irrep{10}+\irrep{40}+\irrep{175}$\\
$\irrepbar{45}\times\irrep{5}$ & = & $\irrep{24}+\irrep{75}+\irrep{126}$\\
$\irrep{45}\times\irrep{10}$ & = & $\irrepbar{10}+\irrepbar{15}+\irrepbar{40}+\irrepbar{175}+\irrepbar{210}$\\
$\irrepbar{45}\times\irrep{10}$ & = & $\irrep{5}+\irrep{45}+\irrep{50}+\irrep{70}+\irrep{280}$\\
$\irrep{45}\times\irrep{15}$ & = & $\irrepbar{10}+\irrepbar{40}+\irrepbar{175}+\irrepbar{450}$\\
$\irrepbar{45}\times\irrep{15}$ & = & $\irrep{45}+\irrep{70}+\irrep{280}+\irrep[1]{280}$\\
$\irrep{45}\times\irrep{24}$ & = & $\irrep{5}+2(\irrep{45})+\irrep{50}+\irrep{70}+\irrep{105}+\irrep{280}+\irrep{480}$\\
$\irrep{45}\times\irrep{35}$ & = & $\irrepbar{160}+\irrepbar{175}+\irrepbar{540}+\irrepbar{700}$\\
$\irrepbar{45}\times\irrep{35}$ & = & $\irrep{45}+\irrep{105}+\irrep{480}+\irrep{945}$\\
$\irrep{45}\times\irrep{40}$ & = & $\irrepbar{10}+\irrepbar{15}+\irrepbar{40}+\irrepbar{160}+2(\irrepbar{175})+\irrepbar{210}+\irrepbar{315}+\irrepbar{700}$\\
$\irrepbar{45}\times\irrep{40}$ & = & $\irrep{5}+2(\irrep{45})+\irrep{50}+\irrep{70}+\irrep{105}+\irrep{280}+\irrep{480}+\irrep{720}$\\
$\irrep{45}\times\irrep{45}$ & = & $\irrep{10}+\irrep{15}+\irrep{35}+2(\irrep{40})+2(\irrep{175})+\irrep{210}+\irrep{315}+\irrep{450}+\irrep{560}$\\
$\irrepbar{45}\times\irrep{45}$ & = & $\irrep{1}+2(\irrep{24})+2(\irrep{75})+\irrep{126}+\irrepbar{126}+\irrep[1]{175}+\irrepbar[1]{175}+\irrep{200}+\irrep{1024}$\\
$\irrep{50}\times\irrep{5}$ & = & $\irrep{40}+\irrep{210}$\\
$\irrepbar{50}\times\irrep{5}$ & = & $\irrep{75}+\irrep[1]{175}$\\
$\irrep{50}\times\irrep{10}$ & = & $\irrepbar{10}+\irrepbar{175}+\irrepbar{315}$\\
$\irrepbar{50}\times\irrep{10}$ & = & $\irrep{45}+\irrep[2]{175}+\irrep{280}$\\
$\irrep{50}\times\irrep{15}$ & = & $\irrepbar{15}+\irrepbar{175}+\irrepbar{560}$\\
$\irrepbar{50}\times\irrep{15}$ & = & $\irrep{50}+\irrep{280}+\irrep{420}$\\
$\irrep{50}\times\irrep{24}$ & = & $\irrep{45}+\irrep{50}+\irrep{105}+\irrep{280}+\irrep{720}$\\
$\irrep{50}\times\irrep{35}$ & = & $\irrepbar{210}+\irrepbar{700}+\irrepbar{840}$\\
$\irrepbar{50}\times\irrep{35}$ & = & $\irrep{70}+\irrep{480}+\irrep{1200}$\\
$\irrep{50}\times\irrep{40}$ & = & $\irrepbar{40}+\irrepbar{175}+\irrepbar{210}+\irrepbar{315}+\irrepbar[1]{560}+\irrepbar{700}$\\
$\irrepbar{50}\times\irrep{40}$ & = & $\irrep{5}+\irrep{45}+\irrep{70}+\irrep{280}+\irrep{480}+\irrep{1120}$\\
$\irrep{50}\times\irrep{45}$ & = & $\irrep{10}+\irrep{40}+\irrep{175}+\irrep{210}+\irrep{315}+\irrep{450}+\irrep{1050}$\\
$\irrepbar{50}\times\irrep{45}$ & = & $\irrep{24}+\irrep{75}+\irrep{126}+\irrepbar{126}+\irrep[1]{175}+\irrepbar[1]{700}+\irrep{1024}$\\
$\irrep{50}\times\irrep{50}$ & = & $\irrep{15}+\irrep{175}+\irrep{210}+\irrep{490}+\irrep{560}+\irrep{1050}$\\
$\irrepbar{50}\times\irrep{50}$ & = & $\irrep{1}+\irrep{24}+\irrep{75}+\irrep{200}+\irrep{1024}+\irrep{1176}$\\
$\irrepbar{70}\times\irrep{5}$ & = & $\irrep{24}+\irrepbar{126}+\irrep{200}$\\
$\irrep{70}\times\irrep{5}$ & = & $\irrep{15}+\irrep{160}+\irrep{175}$\\
$\irrepbar{70}\times\irrep{10}$ & = & $\irrep{45}+\irrep{70}+\irrep{105}+\irrep{480}$\\
$\irrep{70}\times\irrep{10}$ & = & $\irrepbar{35}+\irrepbar{40}+\irrepbar{175}+\irrepbar{450}$\\
$\irrepbar{70}\times\irrep{15}$ & = & $\irrep{5}+\irrep{45}+\irrep{70}+\irrep[1]{450}+\irrep{480}$\\
$\irrep{70}\times\irrep{15}$ & = & $\irrepbar{35}+\irrepbar{40}+\irrepbar{210}+\irrepbar[1]{315}+\irrepbar{450}$\\
$\irrep{70}\times\irrep{24}$ & = & $\irrep{5}+\irrep{45}+2(\irrep{70})+\irrep{280}+\irrep[1]{280}+\irrep[1]{450}+\irrep{480}$\\
$\irrepbar{70}\times\irrep{35}$ & = & $\irrep{50}+\irrep[1]{70}+\irrep{105}+\irrep[2]{560}+\irrep{720}+\irrep{945}$\\
$\irrep{70}\times\irrep{35}$ & = & $\irrepbar{10}+\irrepbar{15}+\irrepbar{160}+\irrepbar{175}+\irrepbar{875}+\irrepbar{1215}$\\
$\irrepbar{70}\times\irrep{40}$ & = & $\irrep{45}+\irrep{50}+\irrep[1]{70}+2(\irrep{105})+\irrep{280}+\irrep{480}+\irrep{720}+\irrep{945}$\\
$\irrep{70}\times\irrep{40}$ & = & $\irrepbar{10}+\irrepbar{15}+\irrepbar{40}+\irrepbar{160}+2(\irrepbar{175})+\irrepbar{450}+\irrepbar{560}+\irrepbar{1215}$\\
$\irrepbar{70}\times\irrep{45}$ & = & $\irrep{24}+\irrep{75}+\irrep{126}+2(\irrepbar{126})+\irrepbar[1]{175}+\irrep{200}+\irrepbar{224}+\irrep{1024}+\irrepbar[1]{1050}$\\
$\irrep{70}\times\irrep{45}$ & = & $\irrep{10}+\irrep{15}+\irrep{40}+\irrep{160}+2(\irrep{175})+\irrep{210}+\irrep{450}+\irrep{700}+\irrep{1215}$\\
$\irrepbar{70}\times\irrep{50}$ & = & $\irrep{75}+\irrepbar{126}+\irrep[1]{175}+\irrepbar[1]{175}+\irrepbar{224}+\irrep{1024}+\irrepbar{1701}$\\
$\irrep{70}\times\irrep{50}$ & = & $\irrep{35}+\irrep{40}+\irrep{175}+\irrep{210}+\irrep{450}+\irrep{700}+\irrep{1890}$\\
$\irrepbar{70}\times\irrep{70}$ & = & $\irrep{1}+2(\irrep{24})+\irrep{75}+\irrep{126}+\irrepbar{126}+2(\irrep{200})+\irrep{1000}+\irrep{1024}+\irrep[1]{1050}+\irrepbar[1]{1050}$\\
$\irrep{70}\times\irrep{70}$ & = & $\irrep{10}+\irrep{15}+2(\irrep{160})+2(\irrep{175})+\irrep{315}+\irrep{540}+\irrep{560}+\irrep{700}+\irrep{875}+\irrep{1215}$\\
$\irrep{75}\times\irrep{5}$ & = & $\irrep{45}+\irrep{50}+\irrep{280}$\\
$\irrep{75}\times\irrep{10}$ & = & $\irrep{10}+\irrep{40}+\irrep{175}+\irrep{210}+\irrep{315}$\\
$\irrep{75}\times\irrep{15}$ & = & $\irrep{40}+\irrep{175}+\irrep{210}+\irrep{700}$\\
$\irrep{75}\times\irrep{24}$ & = & $\irrep{24}+2(\irrep{75})+\irrep{126}+\irrepbar{126}+\irrep[1]{175}+\irrepbar[1]{175}+\irrep{1024}$\\
$\irrep{75}\times\irrep{35}$ & = & $\irrep{175}+\irrep{450}+\irrep{560}+\irrep{1440}$\\
$\irrep{75}\times\irrep{40}$ & = & $\irrep{10}+\irrep{15}+\irrep{40}+2(\irrep{175})+\irrep{210}+\irrep{315}+\irrep{450}+\irrep{560}+\irrep{1050}$\\
$\irrep{75}\times\irrep{45}$ & = & $\irrep{5}+2(\irrep{45})+\irrep{50}+\irrep{70}+\irrep{105}+\irrep[2]{175}+2(\irrep{280})+\irrep{480}+\irrep{720}+\irrep{1120}$\\
$\irrep{75}\times\irrep{50}$ & = & $\irrep{5}+\irrep{45}+\irrep{50}+\irrep{70}+\irrep{280}+\irrep{480}+\irrep{720}+\irrep{980}+\irrep{1120}$\\
$\irrep{75}\times\irrep{70}$ & = & $\irrep{45}+\irrep{50}+\irrep{70}+\irrep{105}+2(\irrep{280})+\irrep[1]{280}+\irrep{420}+\irrep{480}+\irrep{720}+\irrep{2520}$\\
$\irrep{75}\times\irrep{75}$ & = & $\irrep{1}+2(\irrep{24})+2(\irrep{75})+\irrep{126}+\irrepbar{126}+\irrep[1]{175}+\irrepbar[1]{175}+\irrep{200}+\irrep[1]{700}+\irrepbar[1]{700}+2(\irrep{1024})+\irrep{1176}$\\
\end{longtable}
\newpage
\begin{longtable}{rcl}
\caption{\label{tab:SU6TensorProducts}SU(6) Tensor Products}\\
\toprule
\endfirsthead
\caption[]{SU(6) Tensor Products (continued)}\\
\endhead
\bottomrule
\endlastfoot
$\irrepbar{6}\times\irrep{6}$ & = & $\irrep{1}+\irrep{35}$\\
$\irrep{6}\times\irrep{6}$ & = & $\irrep{15}+\irrep{21}$\\
$\irrepbar{15}\times\irrep{6}$ & = & $\irrepbar{6}+\irrepbar{84}$\\
$\irrep{15}\times\irrep{6}$ & = & $\irrep{20}+\irrep{70}$\\
$\irrepbar{15}\times\irrep{15}$ & = & $\irrep{1}+\irrep{35}+\irrep{189}$\\
$\irrep{15}\times\irrep{15}$ & = & $\irrepbar{15}+\irrepbar{105}+\irrepbar[1]{105}$\\
$\irrep{20}\times\irrep{6}$ & = & $\irrepbar{15}+\irrepbar{105}$\\
$\irrep{20}\times\irrep{15}$ & = & $\irrepbar{6}+\irrepbar{84}+\irrepbar{210}$\\
$\irrep{20}\times\irrep{20}$ & = & $\irrep{1}+\irrep{35}+\irrep{175}+\irrep{189}$\\
$\irrepbar{21}\times\irrep{6}$ & = & $\irrepbar{6}+\irrepbar{120}$\\
$\irrep{21}\times\irrep{6}$ & = & $\irrep{56}+\irrep{70}$\\
$\irrepbar{21}\times\irrep{15}$ & = & $\irrep{35}+\irrepbar{280}$\\
$\irrep{21}\times\irrep{15}$ & = & $\irrepbar{105}+\irrepbar[1]{210}$\\
$\irrep{21}\times\irrep{20}$ & = & $\irrepbar{84}+\irrepbar{336}$\\
$\irrepbar{21}\times\irrep{21}$ & = & $\irrep{1}+\irrep{35}+\irrep{405}$\\
$\irrep{21}\times\irrep{21}$ & = & $\irrepbar[1]{105}+\irrepbar{126}+\irrepbar[1]{210}$\\
$\irrep{35}\times\irrep{6}$ & = & $\irrep{6}+\irrep{84}+\irrep{120}$\\
$\irrep{35}\times\irrep{15}$ & = & $\irrep{15}+\irrep{21}+\irrep{105}+\irrep{384}$\\
$\irrep{35}\times\irrep{20}$ & = & $\irrep{20}+\irrep{70}+\irrepbar{70}+\irrep{540}$\\
$\irrep{35}\times\irrep{21}$ & = & $\irrep{15}+\irrep{21}+\irrep{315}+\irrep{384}$\\
$\irrep{35}\times\irrep{35}$ & = & $\irrep{1}+2(\irrep{35})+\irrep{189}+\irrep{280}+\irrepbar{280}+\irrep{405}$\\
$\irrepbar{56}\times\irrep{6}$ & = & $\irrepbar{21}+\irrepbar{315}$\\
$\irrep{56}\times\irrep{6}$ & = & $\irrepbar{126}+\irrepbar[1]{210}$\\
$\irrepbar{56}\times\irrep{15}$ & = & $\irrepbar{120}+\irrepbar{720}$\\
$\irrep{56}\times\irrep{15}$ & = & $\irrepbar{336}+\irrepbar{504}$\\
$\irrep{56}\times\irrep{20}$ & = & $\irrep{280}+\irrep[2]{840}$\\
$\irrepbar{56}\times\irrep{21}$ & = & $\irrepbar{6}+\irrepbar{120}+\irrepbar[1]{1050}$\\
$\irrep{56}\times\irrep{21}$ & = & $\irrepbar{252}+\irrepbar{420}+\irrepbar{504}$\\
$\irrep{56}\times\irrep{35}$ & = & $\irrep{56}+\irrep{70}+\irrep{700}+\irrep{1134}$\\
$\irrepbar{56}\times\irrep{56}$ & = & $\irrep{1}+\irrep{35}+\irrep{405}+\irrep{2695}$\\
$\irrep{56}\times\irrep{56}$ & = & $\irrep{462}+\irrep{490}+\irrep[2]{1050}+\irrep[1]{1134}$\\
$\irrepbar{70}\times\irrep{6}$ & = & $\irrepbar{15}+\irrepbar{21}+\irrepbar{384}$\\
$\irrep{70}\times\irrep{6}$ & = & $\irrepbar{105}+\irrepbar[1]{105}+\irrepbar[1]{210}$\\
$\irrepbar{70}\times\irrep{15}$ & = & $\irrepbar{6}+\irrepbar{84}+\irrepbar{120}+\irrepbar{840}$\\
$\irrep{70}\times\irrep{15}$ & = & $\irrepbar{84}+\irrepbar{210}+\irrepbar{336}+\irrepbar{420}$\\
$\irrep{70}\times\irrep{20}$ & = & $\irrep{35}+\irrep{189}+\irrep{280}+\irrep{896}$\\
$\irrepbar{70}\times\irrep{21}$ & = & $\irrepbar{6}+\irrepbar{84}+\irrepbar{120}+\irrepbar{1260}$\\
$\irrep{70}\times\irrep{21}$ & = & $\irrepbar{210}+\irrepbar{336}+\irrepbar{420}+\irrepbar{504}$\\
$\irrep{70}\times\irrep{35}$ & = & $\irrep{20}+\irrep{56}+2(\irrep{70})+\irrep{540}+\irrepbar{560}+\irrep{1134}$\\
$\irrepbar{70}\times\irrep{56}$ & = & $\irrep{35}+\irrep{280}+\irrep{405}+\irrep{3200}$\\
$\irrep{70}\times\irrep{56}$ & = & $\irrep[2]{840}+\irrep{896}+\irrep[2]{1050}+\irrep[1]{1134}$\\
$\irrepbar{70}\times\irrep{70}$ & = & $\irrep{1}+2(\irrep{35})+\irrep{189}+\irrep{280}+\irrepbar{280}+\irrep{405}+\irrep{3675}$\\
$\irrep{70}\times\irrep{70}$ & = & $\irrep{175}+\irrep{189}+\irrep{280}+\irrep{490}+\irrep[2]{840}+2(\irrep{896})+\irrep[1]{1134}$\\
$\irrep{84}\times\irrep{6}$ & = & $\irrep{15}+\irrep{105}+\irrep{384}$\\
$\irrepbar{84}\times\irrep{6}$ & = & $\irrep{35}+\irrep{189}+\irrep{280}$\\
$\irrep{84}\times\irrep{15}$ & = & $\irrep{20}+\irrep{70}+\irrepbar{70}+\irrep{540}+\irrepbar{560}$\\
$\irrepbar{84}\times\irrep{15}$ & = & $\irrep{6}+\irrep{84}+\irrep{120}+\irrep{210}+\irrep{840}$\\
$\irrep{84}\times\irrep{20}$ & = & $\irrepbar{15}+\irrepbar{21}+\irrepbar{105}+\irrepbar[1]{105}+\irrepbar{384}+\irrepbar{1050}$\\
$\irrep{84}\times\irrep{21}$ & = & $\irrep{20}+\irrep{70}+\irrep{540}+\irrep{1134}$\\
$\irrepbar{84}\times\irrep{21}$ & = & $\irrep{84}+\irrep{120}+\irrep{720}+\irrep{840}$\\
$\irrep{84}\times\irrep{35}$ & = & $\irrep{6}+2(\irrep{84})+\irrep{120}+\irrep{210}+\irrep{336}+\irrep{840}+\irrep{1260}$\\
$\irrep{84}\times\irrep{56}$ & = & $\irrepbar{105}+\irrepbar[1]{210}+\irrepbar{1701}+\irrepbar{2688}$\\
$\irrepbar{84}\times\irrep{56}$ & = & $\irrep{315}+\irrep{384}+\irrep{1575}+\irrep{2430}$\\
$\irrep{84}\times\irrep{70}$ & = & $\irrepbar{15}+2(\irrepbar{105})+\irrepbar[1]{105}+\irrepbar[1]{210}+\irrepbar{384}+\irrepbar{1050}+\irrepbar{1701}+\irrepbar{2205}$\\
$\irrepbar{84}\times\irrep{70}$ & = & $\irrep{15}+\irrep{21}+\irrep{105}+\irrep{315}+2(\irrep{384})+\irrep{1050}+\irrep{1176}+\irrep{2430}$\\
$\irrep{84}\times\irrep{84}$ & = & $\irrep{15}+\irrep{21}+2(\irrep{105})+\irrep[1]{105}+\irrep[1]{210}+2(\irrep{384})+\irrep{1050}+\irrep{1176}+\irrep{1701}+\irrep{1800}$\\
$\irrepbar{84}\times\irrep{84}$ & = & $\irrep{1}+2(\irrep{35})+\irrep{175}+2(\irrep{189})+\irrep{280}+\irrepbar{280}+\irrep{405}+\irrep{896}+\irrepbar{896}+\irrep{3675}$\\
$\irrep[1]{105}\times\irrep{6}$ & = & $\irrepbar{70}+\irrep{560}$\\
$\irrep{105}\times\irrep{6}$ & = & $\irrep{20}+\irrepbar{70}+\irrep{540}$\\
$\irrepbar[1]{105}\times\irrep{6}$ & = & $\irrepbar{210}+\irrepbar{420}$\\
$\irrepbar{105}\times\irrep{6}$ & = & $\irrepbar{84}+\irrepbar{210}+\irrepbar{336}$\\
$\irrep[1]{105}\times\irrep{15}$ & = & $\irrepbar{15}+\irrepbar{384}+\irrepbar{1176}$\\
$\irrep{105}\times\irrep{15}$ & = & $\irrepbar{15}+\irrepbar{21}+\irrepbar{105}+\irrepbar{384}+\irrepbar{1050}$\\
$\irrepbar[1]{105}\times\irrep{15}$ & = & $\irrep{189}+\irrep{490}+\irrep{896}$\\
$\irrepbar{105}\times\irrep{15}$ & = & $\irrep{35}+\irrep{175}+\irrep{189}+\irrep{280}+\irrep{896}$\\
$\irrep[1]{105}\times\irrep{20}$ & = & $\irrepbar{84}+\irrepbar{840}+\irrepbar[1]{1176}$\\
$\irrep{105}\times\irrep{20}$ & = & $\irrepbar{6}+\irrepbar{84}+\irrepbar{120}+\irrepbar{210}+\irrepbar{840}+\irrepbar[1]{840}$\\
$\irrep[1]{105}\times\irrep{21}$ & = & $\irrepbar{21}+\irrepbar{384}+\irrepbar{1800}$\\
$\irrep{105}\times\irrep{21}$ & = & $\irrepbar{15}+\irrepbar{105}+\irrepbar{384}+\irrepbar{1701}$\\
$\irrepbar[1]{105}\times\irrep{21}$ & = & $\irrep{175}+\irrep{896}+\irrep[1]{1134}$\\
$\irrepbar{105}\times\irrep{21}$ & = & $\irrep{189}+\irrep{280}+\irrep[2]{840}+\irrep{896}$\\
$\irrep[1]{105}\times\irrep{35}$ & = & $\irrep{105}+\irrep[1]{105}+\irrep[1]{210}+\irrep{1050}+\irrep{2205}$\\
$\irrep{105}\times\irrep{35}$ & = & $\irrep{15}+2(\irrep{105})+\irrep[1]{105}+\irrep[1]{210}+\irrep{384}+\irrep{1050}+\irrep{1701}$\\
$\irrep[1]{105}\times\irrep{56}$ & = & $\irrepbar{120}+\irrepbar{1260}+\irrepbar{4500}$\\
$\irrep{105}\times\irrep{56}$ & = & $\irrepbar{84}+\irrepbar{336}+\irrepbar{1260}+\irrepbar{4200}$\\
$\irrepbar[1]{105}\times\irrep{56}$ & = & $\irrep[1]{840}+\irrep{2520}+\irrep[3]{2520}$\\
$\irrepbar{105}\times\irrep{56}$ & = & $\irrep{720}+\irrep{840}+\irrep[1]{1800}+\irrep{2520}$\\
$\irrep[1]{105}\times\irrep{70}$ & = & $\irrepbar{6}+\irrepbar{84}+\irrepbar{120}+\irrepbar{840}+\irrepbar{1260}+\irrepbar{5040}$\\
$\irrep{105}\times\irrep{70}$ & = & $\irrepbar{6}+2(\irrepbar{84})+\irrepbar{120}+\irrepbar{210}+\irrepbar{336}+\irrepbar{840}+\irrepbar{1260}+\irrepbar{4410}$\\
$\irrepbar[1]{105}\times\irrep{70}$ & = & $\irrep{210}+\irrep{840}+\irrep[1]{840}+\irrep[1]{1176}+\irrep{1764}+\irrep{2520}$\\
$\irrepbar{105}\times\irrep{70}$ & = & $\irrep{84}+\irrep{120}+\irrep{210}+\irrep{720}+2(\irrep{840})+\irrep[1]{840}+\irrep[1]{1176}+\irrep{2520}$\\
$\irrep[1]{105}\times\irrep{84}$ & = & $\irrep{20}+\irrepbar{70}+\irrep{540}+\irrep{560}+\irrepbar{1134}+\irrep{1960}+\irrepbar{4536}$\\
$\irrep{105}\times\irrep{84}$ & = & $\irrep{20}+\irrepbar{56}+\irrep{70}+2(\irrepbar{70})+2(\irrep{540})+\irrep{560}+\irrepbar{560}+\irrepbar{1134}+\irrep{1960}+\irrepbar{3240}$\\
$\irrepbar[1]{105}\times\irrep{84}$ & = & $\irrepbar{84}+\irrepbar{210}+\irrepbar{336}+\irrepbar{420}+\irrepbar{840}+\irrepbar[2]{2520}+\irrepbar{4410}$\\
$\irrepbar{105}\times\irrep{84}$ & = & $\irrepbar{6}+2(\irrepbar{84})+\irrepbar{120}+2(\irrepbar{210})+\irrepbar{336}+\irrepbar{420}+\irrepbar{840}+\irrepbar[1]{840}+\irrepbar{1260}+\irrepbar{4410}$\\
$\irrep[1]{105}\times\irrep[1]{105}$ & = & $\irrepbar[1]{105}+\irrepbar{1050}+\irrepbar{1176}+\irrepbar[1]{1764}+\irrepbar[1]{2520}+\irrepbar[1]{4410}$\\
$\irrep[1]{105}\times\irrep{105}$ & = & $\irrepbar{105}+\irrepbar{384}+\irrepbar{1050}+\irrepbar{1176}+\irrepbar{1470}+\irrepbar{2430}+\irrepbar[1]{4410}$\\
$\irrep{105}\times\irrep{105}$ & = & $\irrepbar{15}+\irrepbar{21}+\irrepbar{105}+\irrepbar[1]{105}+\irrepbar{315}+2(\irrepbar{384})+2(\irrepbar{1050})+\irrepbar{1176}+\irrepbar{1470}+\irrepbar{2430}+\irrepbar[1]{2520}$\\
$\irrepbar[1]{105}\times\irrep{105}$ & = & $\irrep{35}+\irrep{189}+\irrep{280}+\irrepbar{280}+\irrep{896}+\irrep{3675}+\irrepbar{5670}$\\
$\irrepbar{105}\times\irrep{105}$ & = & $\irrep{1}+2(\irrep{35})+\irrep{175}+2(\irrep{189})+\irrep{280}+\irrepbar{280}+\irrep{405}+\irrep{896}+\irrepbar{896}+\irrep{3675}+\irrep{3969}$\\
\end{longtable}
\newpage
\begin{longtable}{rcl}
\caption{\label{tab:SU7TensorProducts}SU(7) Tensor Products}\\
\toprule
\endfirsthead
\caption[]{SU(7) Tensor Products (continued)}\\
\endhead
\bottomrule
\endlastfoot
$\irrepbar{7}\times\irrep{7}$ & = & $\irrep{1}+\irrep{48}$\\
$\irrep{7}\times\irrep{7}$ & = & $\irrep{21}+\irrep{28}$\\
$\irrepbar{21}\times\irrep{7}$ & = & $\irrepbar{7}+\irrepbar{140}$\\
$\irrep{21}\times\irrep{7}$ & = & $\irrep{35}+\irrep{112}$\\
$\irrepbar{21}\times\irrep{21}$ & = & $\irrep{1}+\irrep{48}+\irrep{392}$\\
$\irrep{21}\times\irrep{21}$ & = & $\irrepbar{35}+\irrepbar{196}+\irrepbar{210}$\\
$\irrepbar{28}\times\irrep{7}$ & = & $\irrepbar{7}+\irrepbar{189}$\\
$\irrep{28}\times\irrep{7}$ & = & $\irrep{84}+\irrep{112}$\\
$\irrepbar{28}\times\irrep{21}$ & = & $\irrep{48}+\irrepbar{540}$\\
$\irrep{28}\times\irrep{21}$ & = & $\irrepbar{210}+\irrepbar{378}$\\
$\irrepbar{28}\times\irrep{28}$ & = & $\irrep{1}+\irrep{48}+\irrep[1]{735}$\\
$\irrep{28}\times\irrep{28}$ & = & $\irrepbar{196}+\irrepbar[1]{210}+\irrepbar{378}$\\
$\irrepbar{35}\times\irrep{7}$ & = & $\irrepbar{21}+\irrepbar{224}$\\
$\irrep{35}\times\irrep{7}$ & = & $\irrepbar{35}+\irrepbar{210}$\\
$\irrepbar{35}\times\irrep{21}$ & = & $\irrepbar{7}+\irrepbar{140}+\irrepbar{588}$\\
$\irrep{35}\times\irrep{21}$ & = & $\irrepbar{21}+\irrepbar{224}+\irrepbar{490}$\\
$\irrepbar{35}\times\irrep{28}$ & = & $\irrepbar{140}+\irrepbar{840}$\\
$\irrep{35}\times\irrep{28}$ & = & $\irrepbar{224}+\irrepbar{756}$\\
$\irrepbar{35}\times\irrep{35}$ & = & $\irrep{1}+\irrep{48}+\irrep{392}+\irrep{784}$\\
$\irrep{35}\times\irrep{35}$ & = & $\irrepbar{7}+\irrepbar{140}+\irrepbar[1]{490}+\irrepbar{588}$\\
$\irrep{48}\times\irrep{7}$ & = & $\irrep{7}+\irrep{140}+\irrep{189}$\\
$\irrep{48}\times\irrep{21}$ & = & $\irrep{21}+\irrep{28}+\irrep{224}+\irrep{735}$\\
$\irrep{48}\times\irrep{28}$ & = & $\irrep{21}+\irrep{28}+\irrep{560}+\irrep{735}$\\
$\irrep{48}\times\irrep{35}$ & = & $\irrep{35}+\irrep{112}+\irrep{210}+\irrep{1323}$\\
$\irrep{48}\times\irrep{48}$ & = & $\irrep{1}+2(\irrep{48})+\irrep{392}+\irrep{540}+\irrepbar{540}+\irrep[1]{735}$\\
$\irrepbar{84}\times\irrep{7}$ & = & $\irrepbar{28}+\irrepbar{560}$\\
$\irrep{84}\times\irrep{7}$ & = & $\irrepbar[1]{210}+\irrepbar{378}$\\
$\irrepbar{84}\times\irrep{21}$ & = & $\irrepbar{189}+\irrepbar{1575}$\\
$\irrep{84}\times\irrep{21}$ & = & $\irrepbar{756}+\irrepbar{1008}$\\
$\irrepbar{84}\times\irrep{28}$ & = & $\irrepbar{7}+\irrepbar{189}+\irrepbar{2156}$\\
$\irrep{84}\times\irrep{28}$ & = & $\irrepbar{462}+\irrepbar{882}+\irrepbar{1008}$\\
$\irrepbar{84}\times\irrep{35}$ & = & $\irrepbar{540}+\irrepbar{2400}$\\
$\irrep{84}\times\irrep{35}$ & = & $\irrepbar{840}+\irrepbar{2100}$\\
$\irrep{84}\times\irrep{48}$ & = & $\irrep{84}+\irrep{112}+\irrep{1386}+\irrep{2450}$\\
$\irrep{84}\times\irrep{84}$ & = & $\irrepbar{924}+\irrepbar{1176}+\irrepbar{2310}+\irrepbar[1]{2646}$\\
$\irrepbar{112}\times\irrep{7}$ & = & $\irrepbar{21}+\irrepbar{28}+\irrepbar{735}$\\
$\irrep{112}\times\irrep{7}$ & = & $\irrepbar{196}+\irrepbar{210}+\irrepbar{378}$\\
$\irrepbar{112}\times\irrep{21}$ & = & $\irrepbar{7}+\irrepbar{140}+\irrepbar{189}+\irrepbar{2016}$\\
$\irrep{112}\times\irrep{21}$ & = & $\irrepbar{224}+\irrepbar{490}+\irrepbar{756}+\irrepbar{882}$\\
$\irrepbar{112}\times\irrep{28}$ & = & $\irrepbar{7}+\irrepbar{140}+\irrepbar{189}+\irrepbar{2800}$\\
$\irrep{112}\times\irrep{28}$ & = & $\irrepbar{490}+\irrepbar{756}+\irrepbar{882}+\irrepbar{1008}$\\
$\irrepbar{112}\times\irrep{35}$ & = & $\irrep{48}+\irrep{392}+\irrepbar{540}+\irrepbar{2940}$\\
$\irrep{112}\times\irrep{35}$ & = & $\irrepbar{140}+\irrepbar{588}+\irrepbar{840}+\irrepbar{2352}$\\
$\irrep{112}\times\irrep{48}$ & = & $\irrep{35}+\irrep{84}+2(\irrep{112})+\irrep{1260}+\irrep{1323}+\irrep{2450}$\\
$\irrep{112}\times\irrep{84}$ & = & $\irrepbar{2100}+\irrepbar{2310}+\irrepbar{2352}+\irrepbar[1]{2646}$\\
$\irrep{112}\times\irrep{112}$ & = & $\irrepbar[1]{490}+\irrepbar{588}+\irrepbar{840}+\irrepbar{1176}+\irrepbar{2100}+2(\irrepbar{2352})+\irrepbar[1]{2646}$\\
\end{longtable}
\newpage
\begin{longtable}{rcl}
\caption{\label{tab:SU8TensorProducts}SU(8) Tensor Products}\\
\toprule
\endfirsthead
\caption[]{SU(8) Tensor Products (continued)}\\
\endhead
\bottomrule
\endlastfoot
$\irrepbar{8}\times\irrep{8}$ & = & $\irrep{1}+\irrep{63}$\\
$\irrep{8}\times\irrep{8}$ & = & $\irrep{28}+\irrep{36}$\\
$\irrepbar{28}\times\irrep{8}$ & = & $\irrepbar{8}+\irrepbar{216}$\\
$\irrep{28}\times\irrep{8}$ & = & $\irrep{56}+\irrep{168}$\\
$\irrepbar{28}\times\irrep{28}$ & = & $\irrep{1}+\irrep{63}+\irrep{720}$\\
$\irrep{28}\times\irrep{28}$ & = & $\irrep{70}+\irrep{336}+\irrep{378}$\\
$\irrepbar{36}\times\irrep{8}$ & = & $\irrepbar{8}+\irrepbar{280}$\\
$\irrep{36}\times\irrep{8}$ & = & $\irrep{120}+\irrep{168}$\\
$\irrepbar{36}\times\irrep{28}$ & = & $\irrep{63}+\irrepbar{945}$\\
$\irrep{36}\times\irrep{28}$ & = & $\irrep{378}+\irrep{630}$\\
$\irrepbar{36}\times\irrep{36}$ & = & $\irrep{1}+\irrep{63}+\irrep{1232}$\\
$\irrep{36}\times\irrep{36}$ & = & $\irrep{330}+\irrep{336}+\irrep{630}$\\
$\irrepbar{56}\times\irrep{8}$ & = & $\irrepbar{28}+\irrepbar{420}$\\
$\irrep{56}\times\irrep{8}$ & = & $\irrep{70}+\irrep{378}$\\
$\irrepbar{56}\times\irrep{28}$ & = & $\irrepbar{8}+\irrepbar{216}+\irrepbar{1344}$\\
$\irrep{56}\times\irrep{28}$ & = & $\irrepbar{56}+\irrepbar{504}+\irrepbar{1008}$\\
$\irrepbar{56}\times\irrep{36}$ & = & $\irrepbar{216}+\irrepbar{1800}$\\
$\irrep{56}\times\irrep{36}$ & = & $\irrepbar{504}+\irrepbar[1]{1512}$\\
$\irrepbar{56}\times\irrep{56}$ & = & $\irrep{1}+\irrep{63}+\irrep{720}+\irrep{2352}$\\
$\irrep{56}\times\irrep{56}$ & = & $\irrepbar{28}+\irrepbar{420}+\irrepbar{1176}+\irrepbar{1512}$\\
$\irrep{63}\times\irrep{8}$ & = & $\irrep{8}+\irrep{216}+\irrep{280}$\\
$\irrep{63}\times\irrep{28}$ & = & $\irrep{28}+\irrep{36}+\irrep{420}+\irrep{1280}$\\
$\irrep{63}\times\irrep{36}$ & = & $\irrep{28}+\irrep{36}+\irrep{924}+\irrep{1280}$\\
$\irrep{63}\times\irrep{56}$ & = & $\irrep{56}+\irrep{168}+\irrep{504}+\irrep{2800}$\\
$\irrep{63}\times\irrep{63}$ & = & $\irrep{1}+2(\irrep{63})+\irrep{720}+\irrep{945}+\irrepbar{945}+\irrep{1232}$\\
$\irrep{70}\times\irrep{8}$ & = & $\irrepbar{56}+\irrepbar{504}$\\
$\irrep{70}\times\irrep{28}$ & = & $\irrepbar{28}+\irrepbar{420}+\irrepbar{1512}$\\
$\irrep{70}\times\irrep{36}$ & = & $\irrepbar{420}+\irrepbar{2100}$\\
$\irrep{70}\times\irrep{56}$ & = & $\irrepbar{8}+\irrepbar{216}+\irrepbar{1344}+\irrepbar[1]{2352}$\\
$\irrep{70}\times\irrep{63}$ & = & $\irrep{70}+\irrep{378}+\irrepbar{378}+\irrep{3584}$\\
$\irrep{70}\times\irrep{70}$ & = & $\irrep{1}+\irrep{63}+\irrep{720}+\irrep{1764}+\irrep{2352}$\\
$\irrepbar{120}\times\irrep{8}$ & = & $\irrepbar{36}+\irrepbar{924}$\\
$\irrep{120}\times\irrep{8}$ & = & $\irrep{330}+\irrep{630}$\\
$\irrepbar{120}\times\irrep{28}$ & = & $\irrepbar{280}+\irrepbar{3080}$\\
$\irrep{120}\times\irrep{28}$ & = & $\irrepbar[1]{1512}+\irrepbar{1848}$\\
$\irrepbar{120}\times\irrep{36}$ & = & $\irrepbar{8}+\irrepbar{280}+\irrepbar{4032}$\\
$\irrep{120}\times\irrep{36}$ & = & $\irrepbar{792}+\irrepbar{1680}+\irrepbar{1848}$\\
$\irrep{120}\times\irrep{56}$ & = & $\irrepbar{2100}+\irrepbar{4620}$\\
$\irrep{120}\times\irrep{63}$ & = & $\irrep{120}+\irrep{168}+\irrep[2]{2520}+\irrep{4752}$\\
$\irrepbar{168}\times\irrep{8}$ & = & $\irrepbar{28}+\irrepbar{36}+\irrepbar{1280}$\\
$\irrep{168}\times\irrep{8}$ & = & $\irrep{336}+\irrep{378}+\irrep{630}$\\
$\irrepbar{168}\times\irrep{28}$ & = & $\irrepbar{8}+\irrepbar{216}+\irrepbar{280}+\irrepbar{4200}$\\
$\irrep{168}\times\irrep{28}$ & = & $\irrepbar{504}+\irrepbar{1008}+\irrepbar[1]{1512}+\irrepbar{1680}$\\
$\irrep{168}\times\irrep{36}$ & = & $\irrepbar{1008}+\irrepbar[1]{1512}+\irrepbar{1680}+\irrepbar{1848}$\\
$\irrep{168}\times\irrep{56}$ & = & $\irrepbar{420}+\irrepbar{1512}+\irrepbar{2100}+\irrepbar{5376}$\\
$\irrep{168}\times\irrep{63}$ & = & $\irrep{56}+\irrep{120}+2(\irrep{168})+\irrep{2520}+\irrep{2800}+\irrep{4752}$\\
\end{longtable}
\newpage
\begin{longtable}{rcl}
\caption{\label{tab:SU9TensorProducts}SU(9) Tensor Products}\\
\toprule
\endfirsthead
\caption[]{SU(9) Tensor Products (continued)}\\
\endhead
\bottomrule
\endlastfoot
$\irrepbar{9}\times\irrep{9}$ & = & $\irrep{1}+\irrep{80}$\\
$\irrep{9}\times\irrep{9}$ & = & $\irrep{36}+\irrep{45}$\\
$\irrepbar{36}\times\irrep{9}$ & = & $\irrepbar{9}+\irrepbar{315}$\\
$\irrep{36}\times\irrep{9}$ & = & $\irrep{84}+\irrep{240}$\\
$\irrepbar{36}\times\irrep{36}$ & = & $\irrep{1}+\irrep{80}+\irrep{1215}$\\
$\irrep{36}\times\irrep{36}$ & = & $\irrep{126}+\irrep{540}+\irrep{630}$\\
$\irrepbar{45}\times\irrep{9}$ & = & $\irrepbar{9}+\irrepbar{396}$\\
$\irrep{45}\times\irrep{9}$ & = & $\irrep{165}+\irrep{240}$\\
$\irrepbar{45}\times\irrep{36}$ & = & $\irrep{80}+\irrepbar{1540}$\\
$\irrep{45}\times\irrep{36}$ & = & $\irrep{630}+\irrep{990}$\\
$\irrepbar{45}\times\irrep{45}$ & = & $\irrep{1}+\irrep{80}+\irrep{1944}$\\
$\irrep{45}\times\irrep{45}$ & = & $\irrep{495}+\irrep{540}+\irrep{990}$\\
$\irrep{80}\times\irrep{9}$ & = & $\irrep{9}+\irrep{315}+\irrep{396}$\\
$\irrep{80}\times\irrep{80}$ & = & $\irrep{1}+2(\irrep{80})+\irrep{1215}+\irrep{1540}+\irrepbar{1540}+\irrep{1944}$\\
$\irrepbar{84}\times\irrep{9}$ & = & $\irrepbar{36}+\irrepbar{720}$\\
$\irrep{84}\times\irrep{9}$ & = & $\irrep{126}+\irrep{630}$\\
$\irrep{84}\times\irrep{36}$ & = & $\irrepbar{126}+\irrepbar{1008}+\irrepbar{1890}$\\
$\irrepbar{126}\times\irrep{9}$ & = & $\irrepbar{84}+\irrepbar{1050}$\\
$\irrep{126}\times\irrep{9}$ & = & $\irrepbar{126}+\irrepbar{1008}$\\
$\irrepbar{165}\times\irrep{9}$ & = & $\irrepbar{45}+\irrepbar{1440}$\\
$\irrep{165}\times\irrep{9}$ & = & $\irrep{495}+\irrep{990}$\\
$\irrep{240}\times\irrep{9}$ & = & $\irrep{540}+\irrep{630}+\irrep{990}$\\
\end{longtable}
\begin{longtable}{rcl}
\caption{\label{tab:SU10TensorProducts}SU(10) Tensor Products}\\
\toprule
\endfirsthead
\caption[]{SU(10) Tensor Products (continued)}\\
\endhead
\bottomrule
\endlastfoot
$\irrepbar{10}\times\irrep{10}$ & = & $\irrep{1}+\irrep{99}$\\
$\irrep{10}\times\irrep{10}$ & = & $\irrep{45}+\irrep{55}$\\
$\irrepbar{45}\times\irrep{10}$ & = & $\irrepbar{10}+\irrepbar{440}$\\
$\irrep{45}\times\irrep{10}$ & = & $\irrep{120}+\irrep{330}$\\
$\irrepbar{45}\times\irrep{45}$ & = & $\irrep{1}+\irrep{99}+\irrep{1925}$\\
$\irrep{45}\times\irrep{45}$ & = & $\irrep{210}+\irrep{825}+\irrep{990}$\\
$\irrepbar{55}\times\irrep{10}$ & = & $\irrepbar{10}+\irrepbar{540}$\\
$\irrep{55}\times\irrep{10}$ & = & $\irrep{220}+\irrep{330}$\\
$\irrep{55}\times\irrep{45}$ & = & $\irrep{990}+\irrep{1485}$\\
$\irrep{55}\times\irrep{55}$ & = & $\irrep{715}+\irrep{825}+\irrep{1485}$\\
$\irrep{99}\times\irrep{10}$ & = & $\irrep{10}+\irrep{440}+\irrep{540}$\\
$\irrepbar{120}\times\irrep{10}$ & = & $\irrepbar{45}+\irrepbar{1155}$\\
$\irrep{120}\times\irrep{10}$ & = & $\irrep{210}+\irrep{990}$\\
$\irrepbar{210}\times\irrep{10}$ & = & $\irrepbar{120}+\irrepbar{1980}$\\
$\irrep{210}\times\irrep{10}$ & = & $\irrep{252}+\irrep{1848}$\\
\end{longtable}
\newpage
\begin{longtable}{rcl}
\caption{\label{tab:SU11TensorProducts}SU(11) Tensor Products}\\
\toprule
\endfirsthead
\caption[]{SU(11) Tensor Products (continued)}\\
\endhead
\bottomrule
\endlastfoot
$\irrepbar{11}\times\irrep{11}$ & = & $\irrep{1}+\irrep{120}$\\
$\irrep{11}\times\irrep{11}$ & = & $\irrep{55}+\irrep{66}$\\
$\irrepbar{55}\times\irrep{11}$ & = & $\irrepbar{11}+\irrepbar{594}$\\
$\irrep{55}\times\irrep{11}$ & = & $\irrep{165}+\irrep{440}$\\
$\irrepbar{66}\times\irrep{11}$ & = & $\irrepbar{11}+\irrepbar{715}$\\
$\irrep{66}\times\irrep{11}$ & = & $\irrep{286}+\irrep{440}$\\
$\irrep{120}\times\irrep{11}$ & = & $\irrep{11}+\irrep{594}+\irrep{715}$\\
\end{longtable}
\begin{longtable}{rcl}
\caption{\label{tab:SU12TensorProducts}SU(12) Tensor Products}\\
\toprule
\endfirsthead
\caption[]{SU(12) Tensor Products (continued)}\\
\endhead
\bottomrule
\endlastfoot
$\irrepbar{12}\times\irrep{12}$ & = & $\irrep{1}+\irrep{143}$\\
$\irrep{12}\times\irrep{12}$ & = & $\irrep{66}+\irrep{78}$\\
$\irrepbar{66}\times\irrep{12}$ & = & $\irrepbar{12}+\irrepbar{780}$\\
$\irrep{66}\times\irrep{12}$ & = & $\irrep{220}+\irrep{572}$\\
$\irrepbar{78}\times\irrep{12}$ & = & $\irrepbar{12}+\irrepbar{924}$\\
$\irrep{78}\times\irrep{12}$ & = & $\irrep{364}+\irrep{572}$\\
$\irrep{143}\times\irrep{12}$ & = & $\irrep{12}+\irrep{780}+\irrep{924}$\\
\end{longtable}
\newpage
\subsubsection{\SO{N}}
\begin{longtable}{rcp{0.8\textwidth}}
\caption{\label{tab:SO7TensorProducts}SO(7) Tensor Products}\\
\toprule
\endfirsthead
\caption[]{SO(7) Tensor Products (continued)}\\
\endhead
\bottomrule
\endlastfoot
$\irrep{7}\times\irrep{7}$ & = & $\irrep{1}+\irrep{21}+\irrep{27}$\\
$\irrep{8}\times\irrep{7}$ & = & $\irrep{8}+\irrep{48}$\\
$\irrep{8}\times\irrep{8}$ & = & $\irrep{1}+\irrep{7}+\irrep{21}+\irrep{35}$\\
$\irrep{21}\times\irrep{7}$ & = & $\irrep{7}+\irrep{35}+\irrep{105}$\\
$\irrep{21}\times\irrep{8}$ & = & $\irrep{8}+\irrep{48}+\irrep{112}$\\
$\irrep{21}\times\irrep{21}$ & = & $\irrep{1}+\irrep{21}+\irrep{27}+\irrep{35}+\irrep[1]{168}+\irrep{189}$\\
$\irrep{27}\times\irrep{7}$ & = & $\irrep{7}+\irrep{77}+\irrep{105}$\\
$\irrep{27}\times\irrep{8}$ & = & $\irrep{48}+\irrep{168}$\\
$\irrep{27}\times\irrep{21}$ & = & $\irrep{21}+\irrep{27}+\irrep{189}+\irrep{330}$\\
$\irrep{27}\times\irrep{27}$ & = & $\irrep{1}+\irrep{21}+\irrep{27}+\irrep[1]{168}+\irrep{182}+\irrep{330}$\\
$\irrep{35}\times\irrep{7}$ & = & $\irrep{21}+\irrep{35}+\irrep{189}$\\
$\irrep{35}\times\irrep{8}$ & = & $\irrep{8}+\irrep{48}+\irrep{112}+\irrep[1]{112}$\\
$\irrep{35}\times\irrep{21}$ & = & $\irrep{7}+\irrep{21}+\irrep{35}+\irrep{105}+\irrep{189}+\irrep{378}$\\
$\irrep{35}\times\irrep{27}$ & = & $\irrep{35}+\irrep{105}+\irrep{189}+\irrep{616}$\\
$\irrep{35}\times\irrep{35}$ & = & $\irrep{1}+\irrep{7}+\irrep{21}+\irrep{27}+\irrep{35}+\irrep{105}+\irrep[1]{168}+\irrep{189}+\irrep{294}+\irrep{378}$\\
$\irrep{48}\times\irrep{7}$ & = & $\irrep{8}+\irrep{48}+\irrep{112}+\irrep{168}$\\
$\irrep{48}\times\irrep{8}$ & = & $\irrep{7}+\irrep{21}+\irrep{27}+\irrep{35}+\irrep{105}+\irrep{189}$\\
$\irrep{48}\times\irrep{21}$ & = & $\irrep{8}+2(\irrep{48})+\irrep{112}+\irrep[1]{112}+\irrep{168}+\irrep{512}$\\
$\irrep{48}\times\irrep{27}$ & = & $\irrep{8}+\irrep{48}+\irrep{112}+\irrep{168}+\irrep{448}+\irrep{512}$\\
$\irrep{48}\times\irrep{35}$ & = & $\irrep{8}+2(\irrep{48})+2(\irrep{112})+\irrep[1]{112}+\irrep{168}+\irrep{512}+\irrep{560}$\\
$\irrep{48}\times\irrep{48}$ & = & $\irrep{1}+\irrep{7}+2(\irrep{21})+\irrep{27}+2(\irrep{35})+\irrep{77}+2(\irrep{105})+\irrep[1]{168}+2(\irrep{189})+\irrep{330}+\irrep{378}+\irrep{616}$\\
$\irrep{77}\times\irrep{7}$ & = & $\irrep{27}+\irrep{182}+\irrep{330}$\\
$\irrep{77}\times\irrep{8}$ & = & $\irrep{168}+\irrep{448}$\\
$\irrep{77}\times\irrep{21}$ & = & $\irrep{77}+\irrep{105}+\irrep{616}+\irrep{819}$\\
$\irrep{77}\times\irrep{27}$ & = & $\irrep{7}+\irrep{77}+\irrep{105}+\irrep[1]{378}+\irrep{693}+\irrep{819}$\\
$\irrep{77}\times\irrep{35}$ & = & $\irrep{189}+\irrep{330}+\irrep{616}+\irrep{1560}$\\
$\irrep{77}\times\irrep{48}$ & = & $\irrep{48}+\irrep{168}+\irrep{448}+\irrep{512}+\irrep[1]{1008}+\irrep{1512}$\\
$\irrep{77}\times\irrep{77}$ & = & $\irrep{1}+\irrep{21}+\irrep{27}+\irrep[1]{168}+\irrep{182}+\irrep{330}+\irrep{714}+\irrep{825}+\irrep{1750}+\irrep{1911}$\\
$\irrep{105}\times\irrep{7}$ & = & $\irrep{21}+\irrep{27}+\irrep[1]{168}+\irrep{189}+\irrep{330}$\\
$\irrep{105}\times\irrep{8}$ & = & $\irrep{48}+\irrep{112}+\irrep{168}+\irrep{512}$\\
$\irrep{105}\times\irrep{21}$ & = & $\irrep{7}+\irrep{35}+\irrep{77}+2(\irrep{105})+\irrep{189}+\irrep{378}+\irrep{616}+\irrep{693}$\\
$\irrep{105}\times\irrep{27}$ & = & $\irrep{7}+\irrep{35}+\irrep{77}+2(\irrep{105})+\irrep{378}+\irrep{616}+\irrep{693}+\irrep{819}$\\
$\irrep{105}\times\irrep{35}$ & = & $\irrep{21}+\irrep{27}+\irrep{35}+\irrep{105}+\irrep[1]{168}+2(\irrep{189})+\irrep{330}+\irrep{378}+\irrep{616}+\irrep{1617}$\\
$\irrep{105}\times\irrep{48}$ & = & $\irrep{8}+2(\irrep{48})+2(\irrep{112})+\irrep[1]{112}+2(\irrep{168})+\irrep{448}+2(\irrep{512})+\irrep{560}+\irrep{720}+\irrep{1512}$\\
$\irrep{105}\times\irrep{77}$ & = & $\irrep{21}+\irrep{27}+\irrep[1]{168}+\irrep{182}+\irrep{189}+2(\irrep{330})+\irrep{1560}+\irrep{1617}+\irrep{1750}+\irrep{1911}$\\
$\irrep{105}\times\irrep{105}$ & = & $\irrep{1}+2(\irrep{21})+2(\irrep{27})+\irrep{35}+2(\irrep[1]{168})+\irrep{182}+3(\irrep{189})+\irrep{294}+3(\irrep{330})+\irrep{378}+\irrep{616}+\irrep{825}+\irrep{1560}+2(\irrep{1617})+\irrep{1911}$\\
$\irrep[1]{112}\times\irrep{7}$ & = & $\irrep{112}+\irrep[1]{112}+\irrep{560}$\\
$\irrep{112}\times\irrep{7}$ & = & $\irrep{48}+\irrep{112}+\irrep[1]{112}+\irrep{512}$\\
$\irrep[1]{112}\times\irrep{8}$ & = & $\irrep{35}+\irrep{189}+\irrep{294}+\irrep{378}$\\
$\irrep{112}\times\irrep{8}$ & = & $\irrep{21}+\irrep{35}+\irrep{105}+\irrep[1]{168}+\irrep{189}+\irrep{378}$\\
$\irrep[1]{112}\times\irrep{21}$ & = & $\irrep{48}+\irrep{112}+\irrep[1]{112}+\irrep{512}+\irrep{560}+\irrep{1008}$\\
$\irrep{112}\times\irrep{21}$ & = & $\irrep{8}+\irrep{48}+2(\irrep{112})+\irrep[1]{112}+\irrep{168}+\irrep{512}+\irrep{560}+\irrep{720}$\\
$\irrep[1]{112}\times\irrep{27}$ & = & $\irrep{112}+\irrep[1]{112}+\irrep{512}+\irrep{560}+\irrep{1728}$\\
$\irrep{112}\times\irrep{27}$ & = & $\irrep{48}+\irrep{112}+\irrep[1]{112}+\irrep{168}+\irrep{512}+\irrep{560}+\irrep{1512}$\\
$\irrep[1]{112}\times\irrep{35}$ & = & $\irrep{8}+\irrep{48}+\irrep{112}+\irrep[1]{112}+\irrep{168}+\irrep{512}+\irrep{560}+\irrep{672}+\irrep{720}+\irrep{1008}$\\
$\irrep{112}\times\irrep{35}$ & = & $\irrep{8}+2(\irrep{48})+2(\irrep{112})+\irrep[1]{112}+\irrep{168}+2(\irrep{512})+\irrep{560}+\irrep{720}+\irrep{1008}$\\
$\irrep[1]{112}\times\irrep{48}$ & = & $\irrep{21}+\irrep{35}+\irrep{105}+\irrep[1]{168}+2(\irrep{189})+\irrep{294}+2(\irrep{378})+\irrep{616}+\irrep{1386}+\irrep{1617}$\\
$\irrep{112}\times\irrep{48}$ & = & $\irrep{7}+\irrep{21}+\irrep{27}+2(\irrep{35})+2(\irrep{105})+\irrep[1]{168}+3(\irrep{189})+\irrep{294}+\irrep{330}+2(\irrep{378})+\irrep{616}+\irrep{693}+\irrep{1617}$\\
$\irrep[1]{112}\times\irrep{77}$ & = & $\irrep[1]{112}+\irrep{512}+\irrep{560}+\irrep{1512}+\irrep{1728}+\irrep{4200}$\\
$\irrep{112}\times\irrep{77}$ & = & $\irrep{112}+\irrep{168}+\irrep{448}+\irrep{512}+\irrep{560}+\irrep{1512}+\irrep{1728}+\irrep{3584}$\\
$\irrep[1]{112}\times\irrep{105}$ & = & $\irrep{48}+2(\irrep{112})+\irrep[1]{112}+\irrep{168}+2(\irrep{512})+2(\irrep{560})+\irrep{720}+\irrep{1008}+\irrep{1512}+\irrep{1728}+\irrep{4096}$\\
$\irrep{112}\times\irrep{105}$ & = & $\irrep{8}+2(\irrep{48})+2(\irrep{112})+2(\irrep[1]{112})+2(\irrep{168})+\irrep{448}+3(\irrep{512})+2(\irrep{560})+\irrep{720}+\irrep{1008}+\irrep{1512}+\irrep{1728}+\irrep{2800}$\\
$\irrep[1]{112}\times\irrep[1]{112}$ & = & $\irrep{1}+\irrep{7}+\irrep{21}+\irrep{27}+\irrep{35}+\irrep{77}+\irrep{105}+\irrep[1]{168}+\irrep{189}+\irrep{294}+\irrep{330}+\irrep{378}+\irrep{616}+\irrep{693}+\irrep{825}+\irrep{1386}+\irrep[1]{1386}+\irrep{1617}+\irrep{2079}+\irrep{2310}$\\
$\irrep[1]{112}\times\irrep{112}$ & = & $\irrep{7}+\irrep{21}+\irrep{27}+\irrep{35}+2(\irrep{105})+\irrep[1]{168}+2(\irrep{189})+\irrep{294}+\irrep{330}+2(\irrep{378})+\irrep{616}+\irrep{693}+\irrep{1386}+2(\irrep{1617})+\irrep{2079}+\irrep{2310}$\\
$\irrep{112}\times\irrep{112}$ & = & $\irrep{1}+\irrep{7}+2(\irrep{21})+\irrep{27}+2(\irrep{35})+\irrep{77}+2(\irrep{105})+2(\irrep[1]{168})+3(\irrep{189})+\irrep{294}+\irrep{330}+3(\irrep{378})+2(\irrep{616})+\irrep{693}+\irrep{825}+\irrep{1386}+2(\irrep{1617})+\irrep{2079}$\\
$\irrep[1]{168}\times\irrep{7}$ & = & $\irrep{105}+\irrep{378}+\irrep{693}$\\
$\irrep{168}\times\irrep{7}$ & = & $\irrep{48}+\irrep{168}+\irrep{448}+\irrep{512}$\\
$\irrep[1]{168}\times\irrep{8}$ & = & $\irrep{112}+\irrep{512}+\irrep{720}$\\
$\irrep{168}\times\irrep{8}$ & = & $\irrep{27}+\irrep{77}+\irrep{105}+\irrep{189}+\irrep{330}+\irrep{616}$\\
$\irrep[1]{168}\times\irrep{21}$ & = & $\irrep{21}+\irrep[1]{168}+\irrep{189}+\irrep{330}+\irrep{378}+\irrep{825}+\irrep{1617}$\\
$\irrep{168}\times\irrep{21}$ & = & $\irrep{48}+\irrep{112}+2(\irrep{168})+\irrep{448}+\irrep{512}+\irrep{560}+\irrep{1512}$\\
$\irrep[1]{168}\times\irrep{27}$ & = & $\irrep{27}+\irrep[1]{168}+\irrep{189}+\irrep{294}+\irrep{330}+\irrep{1617}+\irrep{1911}$\\
$\irrep{168}\times\irrep{27}$ & = & $\irrep{8}+\irrep{48}+\irrep{112}+\irrep{168}+\irrep{448}+\irrep{512}+\irrep{720}+\irrep[1]{1008}+\irrep{1512}$\\
$\irrep[1]{168}\times\irrep{35}$ & = & $\irrep{35}+\irrep{105}+\irrep[1]{168}+\irrep{189}+\irrep{378}+\irrep{616}+\irrep{693}+\irrep{1617}+\irrep{2079}$\\
$\irrep{168}\times\irrep{35}$ & = & $\irrep{48}+\irrep{112}+\irrep[1]{112}+2(\irrep{168})+\irrep{448}+2(\irrep{512})+\irrep{560}+\irrep{1512}+\irrep{1728}$\\
$\irrep[1]{168}\times\irrep{48}$ & = & $\irrep{48}+\irrep{112}+\irrep[1]{112}+\irrep{168}+2(\irrep{512})+\irrep{560}+\irrep{720}+\irrep{1008}+\irrep{1512}+\irrep{2800}$\\
$\irrep{168}\times\irrep{48}$ & = & $\irrep{7}+\irrep{21}+\irrep{27}+\irrep{35}+\irrep{77}+2(\irrep{105})+\irrep[1]{168}+\irrep{182}+2(\irrep{189})+2(\irrep{330})+\irrep{378}+2(\irrep{616})+\irrep{693}+\irrep{819}+\irrep{1560}+\irrep{1617}$\\
$\irrep[1]{168}\times\irrep{77}$ & = & $\irrep{77}+\irrep{105}+\irrep{378}+\irrep{616}+\irrep{693}+\irrep{819}+\irrep{1386}+\irrep{4312}+\irrep{4550}$\\
$\irrep{168}\times\irrep{77}$ & = & $\irrep{8}+\irrep{48}+\irrep{112}+\irrep{168}+\irrep{448}+\irrep{512}+\irrep{720}+\irrep[1]{1008}+\irrep{1512}+\irrep{2016}+\irrep{2800}+\irrep{3584}$\\
$\irrep[1]{168}\times\irrep{105}$ & = & $\irrep{7}+\irrep{35}+\irrep{77}+2(\irrep{105})+\irrep{189}+\irrep{294}+2(\irrep{378})+2(\irrep{616})+2(\irrep{693})+\irrep{819}+\irrep{1386}+\irrep{1617}+\irrep{2079}+\irrep{3003}+\irrep{4550}$\\
$\irrep{168}\times\irrep{105}$ & = & $\irrep{8}+2(\irrep{48})+2(\irrep{112})+\irrep[1]{112}+2(\irrep{168})+2(\irrep{448})+3(\irrep{512})+\irrep{560}+\irrep{720}+\irrep{1008}+\irrep[1]{1008}+2(\irrep{1512})+\irrep{1728}+\irrep{2800}+\irrep{3584}$\\
$\irrep[1]{168}\times\irrep[1]{112}$ & = & $\irrep{48}+\irrep{112}+\irrep[1]{112}+\irrep{168}+2(\irrep{512})+\irrep{560}+\irrep{720}+\irrep{1008}+\irrep{1512}+\irrep{1728}+\irrep{2800}+\irrep{4096}+\irrep{4928}$\\
$\irrep[1]{168}\times\irrep{112}$ & = & $\irrep{8}+\irrep{48}+2(\irrep{112})+\irrep[1]{112}+\irrep{168}+\irrep{448}+2(\irrep{512})+2(\irrep{560})+2(\irrep{720})+\irrep{1008}+\irrep{1512}+\irrep{1728}+\irrep{2800}+\irrep{3080}+\irrep{4096}$\\
$\irrep{168}\times\irrep[1]{112}$ & = & $\irrep{35}+\irrep{105}+\irrep[1]{168}+2(\irrep{189})+\irrep{294}+\irrep{330}+2(\irrep{378})+2(\irrep{616})+\irrep{693}+\irrep{1386}+\irrep{1560}+2(\irrep{1617})+\irrep{4095}+\irrep{4550}$\\
$\irrep{168}\times\irrep{112}$ & = & $\irrep{21}+\irrep{27}+\irrep{35}+\irrep{77}+2(\irrep{105})+\irrep[1]{168}+3(\irrep{189})+\irrep{294}+2(\irrep{330})+2(\irrep{378})+3(\irrep{616})+\irrep{693}+\irrep{819}+\irrep{1386}+\irrep{1560}+2(\irrep{1617})+\irrep{1911}+\irrep{4550}$\\
$\irrep{168}\times\irrep{168}$ & = & $\irrep{1}+\irrep{7}+2(\irrep{21})+\irrep{27}+2(\irrep{35})+\irrep{77}+2(\irrep{105})+2(\irrep[1]{168})+\irrep{182}+2(\irrep{189})+2(\irrep{330})+2(\irrep{378})+\irrep[1]{378}+2(\irrep{616})+2(\irrep{693})+2(\irrep{819})+\irrep{825}+2(\irrep{1560})+2(\irrep{1617})+\irrep{1750}+\irrep{1911}+\irrep{2079}+\irrep{3375}+\irrep{4550}$\\
\end{longtable}
\newpage
\begin{longtable}{rcp{0.8\textwidth}}
\caption{\label{tab:SO8TensorProducts}SO(8) Tensor Products}\\
\toprule
\endfirsthead
\caption[]{SO(8) Tensor Products (continued)}\\
\endhead
\bottomrule
\endlastfoot
$\irrepsub{8}{s}\times\irrepsub{8}{s}$ & = & $\irrep{1}+\irrep{28}+\irrepsub{35}{s}$\\
$\irrepsub{8}{c}\times\irrepsub{8}{s}$ & = & $\irrepsub{8}{v}+\irrepsub{56}{v}$\\
$\irrepsub{8}{c}\times\irrepsub{8}{v}$ & = & $\irrepsub{8}{s}+\irrepsub{56}{s}$\\
$\irrepsub{8}{v}\times\irrepsub{8}{s}$ & = & $\irrepsub{8}{c}+\irrepsub{56}{c}$\\
$\irrepsub{8}{v}\times\irrepsub{8}{v}$ & = & $\irrep{1}+\irrep{28}+\irrepsub{35}{v}$\\
$\irrep{28}\times\irrepsub{8}{s}$ & = & $\irrepsub{8}{s}+\irrepsub{56}{s}+\irrepsub{160}{s}$\\
$\irrep{28}\times\irrepsub{8}{v}$ & = & $\irrepsub{8}{v}+\irrepsub{56}{v}+\irrepsub{160}{v}$\\
$\irrep{28}\times\irrep{28}$ & = & $\irrep{1}+\irrep{28}+\irrepsub{35}{v}+\irrepsub{35}{c}+\irrepsub{35}{s}+\irrep{300}+\irrep{350}$\\
$\irrepsub{35}{s}\times\irrepsub{8}{s}$ & = & $\irrepsub{8}{s}+\irrepsub{112}{s}+\irrepsub{160}{s}$\\
$\irrepsub{35}{s}\times\irrepsub{8}{v}$ & = & $\irrepsub{56}{v}+\irrepsub{224}{sv}$\\
$\irrepsub{35}{c}\times\irrepsub{8}{s}$ & = & $\irrepsub{56}{s}+\irrepsub{224}{cs}$\\
$\irrepsub{35}{c}\times\irrepsub{8}{v}$ & = & $\irrepsub{56}{v}+\irrepsub{224}{cv}$\\
$\irrepsub{35}{v}\times\irrepsub{8}{s}$ & = & $\irrepsub{56}{s}+\irrepsub{224}{vs}$\\
$\irrepsub{35}{v}\times\irrepsub{8}{c}$ & = & $\irrepsub{56}{c}+\irrepsub{224}{vc}$\\
$\irrepsub{35}{v}\times\irrepsub{8}{v}$ & = & $\irrepsub{8}{v}+\irrepsub{112}{v}+\irrepsub{160}{v}$\\
$\irrepsub{35}{c}\times\irrep{28}$ & = & $\irrep{28}+\irrepsub{35}{c}+\irrep{350}+\irrepsub{567}{c}$\\
$\irrepsub{35}{v}\times\irrep{28}$ & = & $\irrep{28}+\irrepsub{35}{v}+\irrep{350}+\irrepsub{567}{v}$\\
$\irrepsub{35}{s}\times\irrepsub{35}{c}$ & = & $\irrepsub{35}{v}+\irrep{350}+\irrepsub[1]{840}{v}$\\
$\irrepsub{35}{s}\times\irrepsub{35}{v}$ & = & $\irrepsub{35}{c}+\irrep{350}+\irrepsub[1]{840}{c}$\\
$\irrepsub{35}{c}\times\irrepsub{35}{c}$ & = & $\irrep{1}+\irrep{28}+\irrepsub{35}{c}+\irrepsub{294}{c}+\irrep{300}+\irrepsub{567}{c}$\\
$\irrepsub{35}{c}\times\irrepsub{35}{v}$ & = & $\irrepsub{35}{s}+\irrep{350}+\irrepsub[1]{840}{s}$\\
$\irrepsub{35}{v}\times\irrepsub{35}{v}$ & = & $\irrep{1}+\irrep{28}+\irrepsub{35}{v}+\irrepsub{294}{v}+\irrep{300}+\irrepsub{567}{v}$\\
$\irrepsub{56}{v}\times\irrepsub{8}{s}$ & = & $\irrepsub{8}{c}+\irrepsub{56}{c}+\irrepsub{160}{c}+\irrepsub{224}{sc}$\\
$\irrepsub{56}{v}\times\irrepsub{8}{v}$ & = & $\irrep{28}+\irrepsub{35}{c}+\irrepsub{35}{s}+\irrep{350}$\\
$\irrepsub{56}{c}\times\irrepsub{8}{s}$ & = & $\irrepsub{8}{v}+\irrepsub{56}{v}+\irrepsub{160}{v}+\irrepsub{224}{sv}$\\
$\irrepsub{56}{c}\times\irrepsub{8}{v}$ & = & $\irrepsub{8}{s}+\irrepsub{56}{s}+\irrepsub{160}{s}+\irrepsub{224}{vs}$\\
$\irrepsub{56}{s}\times\irrepsub{8}{s}$ & = & $\irrep{28}+\irrepsub{35}{v}+\irrepsub{35}{c}+\irrep{350}$\\
$\irrepsub{56}{s}\times\irrepsub{8}{c}$ & = & $\irrepsub{8}{v}+\irrepsub{56}{v}+\irrepsub{160}{v}+\irrepsub{224}{cv}$\\
$\irrepsub{56}{s}\times\irrepsub{8}{v}$ & = & $\irrepsub{8}{c}+\irrepsub{56}{c}+\irrepsub{160}{c}+\irrepsub{224}{vc}$\\
$\irrepsub{56}{c}\times\irrep{28}$ & = & $\irrepsub{8}{c}+2(\irrepsub{56}{c})+\irrepsub{160}{c}+\irrepsub{224}{vc}+\irrepsub{224}{sc}+\irrepsub{840}{c}$\\
$\irrepsub{56}{s}\times\irrep{28}$ & = & $\irrepsub{8}{s}+2(\irrepsub{56}{s})+\irrepsub{160}{s}+\irrepsub{224}{vs}+\irrepsub{224}{cs}+\irrepsub{840}{s}$\\
$\irrepsub{56}{v}\times\irrepsub{35}{c}$ & = & $\irrepsub{8}{v}+\irrepsub{56}{v}+\irrepsub{160}{v}+\irrepsub{224}{cv}+\irrepsub{672}{cs}+\irrepsub{840}{v}$\\
$\irrepsub{56}{v}\times\irrepsub{35}{v}$ & = & $\irrepsub{56}{v}+\irrepsub{160}{v}+\irrepsub{224}{cv}+\irrepsub{224}{sv}+\irrepsub{1296}{v}$\\
$\irrepsub{56}{c}\times\irrepsub{35}{c}$ & = & $\irrepsub{56}{c}+\irrepsub{160}{c}+\irrepsub{224}{vc}+\irrepsub{224}{sc}+\irrepsub{1296}{c}$\\
$\irrepsub{56}{c}\times\irrepsub{35}{v}$ & = & $\irrepsub{8}{c}+\irrepsub{56}{c}+\irrepsub{160}{c}+\irrepsub{224}{vc}+\irrepsub{672}{vs}+\irrepsub{840}{c}$\\
$\irrepsub{56}{s}\times\irrepsub{35}{s}$ & = & $\irrepsub{56}{s}+\irrepsub{160}{s}+\irrepsub{224}{vs}+\irrepsub{224}{cs}+\irrepsub{1296}{s}$\\
$\irrepsub{56}{s}\times\irrepsub{35}{c}$ & = & $\irrepsub{8}{s}+\irrepsub{56}{s}+\irrepsub{160}{s}+\irrepsub{224}{cs}+\irrepsub{672}{cv}+\irrepsub{840}{s}$\\
$\irrepsub{56}{s}\times\irrepsub{35}{v}$ & = & $\irrepsub{8}{s}+\irrepsub{56}{s}+\irrepsub{160}{s}+\irrepsub{224}{vs}+\irrepsub{672}{vc}+\irrepsub{840}{s}$\\
$\irrepsub{56}{v}\times\irrepsub{56}{s}$ & = & $\irrepsub{8}{c}+2(\irrepsub{56}{c})+\irrepsub{112}{c}+2(\irrepsub{160}{c})+\irrepsub{224}{vc}+\irrepsub{224}{sc}+\irrepsub{840}{c}+\irrepsub{1296}{c}$\\
$\irrepsub{56}{c}\times\irrepsub{56}{c}$ & = & $\irrep{1}+2(\irrep{28})+\irrepsub{35}{v}+\irrepsub{35}{c}+\irrepsub{35}{s}+\irrep{300}+2(\irrep{350})+\irrepsub{567}{v}+\irrepsub{567}{s}+\irrepsub[1]{840}{c}$\\
$\irrepsub{56}{c}\times\irrepsub{56}{s}$ & = & $\irrepsub{8}{v}+2(\irrepsub{56}{v})+\irrepsub{112}{v}+2(\irrepsub{160}{v})+\irrepsub{224}{cv}+\irrepsub{224}{sv}+\irrepsub{840}{v}+\irrepsub{1296}{v}$\\
$\irrepsub{56}{s}\times\irrepsub{56}{s}$ & = & $\irrep{1}+2(\irrep{28})+\irrepsub{35}{v}+\irrepsub{35}{c}+\irrepsub{35}{s}+\irrep{300}+2(\irrep{350})+\irrepsub{567}{v}+\irrepsub{567}{c}+\irrepsub[1]{840}{s}$\\
$\irrepsub{112}{s}\times\irrepsub{8}{s}$ & = & $\irrepsub{35}{s}+\irrepsub{294}{s}+\irrepsub{567}{s}$\\
$\irrepsub{112}{s}\times\irrepsub{8}{v}$ & = & $\irrepsub{224}{sc}+\irrepsub{672}{sv}$\\
$\irrepsub{112}{c}\times\irrepsub{8}{s}$ & = & $\irrepsub{224}{cv}+\irrepsub{672}{cs}$\\
$\irrepsub{112}{c}\times\irrepsub{8}{v}$ & = & $\irrepsub{224}{cs}+\irrepsub{672}{cv}$\\
$\irrepsub{112}{v}\times\irrepsub{8}{s}$ & = & $\irrepsub{224}{vc}+\irrepsub{672}{vs}$\\
$\irrepsub{112}{v}\times\irrepsub{8}{c}$ & = & $\irrepsub{224}{vs}+\irrepsub{672}{vc}$\\
$\irrepsub{112}{v}\times\irrepsub{8}{v}$ & = & $\irrepsub{35}{v}+\irrepsub{294}{v}+\irrepsub{567}{v}$\\
$\irrepsub{112}{s}\times\irrep{28}$ & = & $\irrepsub{112}{s}+\irrepsub{160}{s}+\irrepsub{1296}{s}+\irrepsub{1568}{s}$\\
$\irrepsub{112}{v}\times\irrep{28}$ & = & $\irrepsub{112}{v}+\irrepsub{160}{v}+\irrepsub{1296}{v}+\irrepsub{1568}{v}$\\
$\irrepsub{112}{s}\times\irrepsub{35}{c}$ & = & $\irrepsub{224}{vs}+\irrepsub{1296}{s}+\irrepsub{2400}{sc}$\\
$\irrepsub{112}{s}\times\irrepsub{35}{v}$ & = & $\irrepsub{224}{cs}+\irrepsub{1296}{s}+\irrepsub{2400}{sv}$\\
$\irrepsub{112}{c}\times\irrepsub{35}{c}$ & = & $\irrepsub{8}{c}+\irrepsub{112}{c}+\irrepsub{160}{c}+\irrepsub[1]{672}{c}+\irrepsub{1400}{c}+\irrepsub{1568}{c}$\\
$\irrepsub{112}{c}\times\irrepsub{35}{v}$ & = & $\irrepsub{224}{sc}+\irrepsub{1296}{c}+\irrepsub{2400}{cv}$\\
$\irrepsub{112}{v}\times\irrepsub{35}{s}$ & = & $\irrepsub{224}{cv}+\irrepsub{1296}{v}+\irrepsub{2400}{vs}$\\
$\irrepsub{112}{v}\times\irrepsub{35}{c}$ & = & $\irrepsub{224}{sv}+\irrepsub{1296}{v}+\irrepsub{2400}{vc}$\\
$\irrepsub{112}{v}\times\irrepsub{35}{v}$ & = & $\irrepsub{8}{v}+\irrepsub{112}{v}+\irrepsub{160}{v}+\irrepsub[1]{672}{v}+\irrepsub{1400}{v}+\irrepsub{1568}{v}$\\
$\irrepsub{112}{s}\times\irrepsub{56}{s}$ & = & $\irrep{350}+\irrepsub{567}{s}+\irrepsub[1]{840}{c}+\irrepsub[1]{840}{v}+\irrepsub{3675}{s}$\\
$\irrepsub{112}{c}\times\irrepsub{56}{c}$ & = & $\irrep{350}+\irrepsub{567}{c}+\irrepsub[1]{840}{s}+\irrepsub[1]{840}{v}+\irrepsub{3675}{c}$\\
$\irrepsub{112}{c}\times\irrepsub{56}{s}$ & = & $\irrepsub{56}{v}+\irrepsub{224}{cv}+\irrepsub{672}{cs}+\irrepsub{840}{v}+\irrepsub{1680}{cv}+\irrepsub{2800}{cv}$\\
$\irrepsub{112}{v}\times\irrepsub{56}{v}$ & = & $\irrep{350}+\irrepsub{567}{v}+\irrepsub[1]{840}{s}+\irrepsub[1]{840}{c}+\irrepsub{3675}{v}$\\
$\irrepsub{112}{v}\times\irrepsub{56}{c}$ & = & $\irrepsub{56}{s}+\irrepsub{224}{vs}+\irrepsub{672}{vc}+\irrepsub{840}{s}+\irrepsub{1680}{vs}+\irrepsub{2800}{vs}$\\
$\irrepsub{112}{v}\times\irrepsub{56}{s}$ & = & $\irrepsub{56}{c}+\irrepsub{224}{vc}+\irrepsub{672}{vs}+\irrepsub{840}{c}+\irrepsub{1680}{vc}+\irrepsub{2800}{vc}$\\
$\irrepsub{112}{s}\times\irrepsub{112}{s}$ & = & $\irrep{1}+\irrep{28}+\irrepsub{35}{s}+\irrepsub{294}{s}+\irrep{300}+\irrepsub{567}{s}+\irrepsub{1386}{s}+\irrep{1925}+\irrepsub{3696}{s}+\irrepsub{4312}{s}$\\
$\irrepsub{112}{v}\times\irrepsub{112}{v}$ & = & $\irrep{1}+\irrep{28}+\irrepsub{35}{v}+\irrepsub{294}{v}+\irrep{300}+\irrepsub{567}{v}+\irrepsub{1386}{v}+\irrep{1925}+\irrepsub{3696}{v}+\irrepsub{4312}{v}$\\
$\irrepsub{160}{s}\times\irrepsub{8}{s}$ & = & $\irrep{28}+\irrepsub{35}{s}+\irrep{300}+\irrep{350}+\irrepsub{567}{s}$\\
$\irrepsub{160}{s}\times\irrepsub{8}{v}$ & = & $\irrepsub{56}{c}+\irrepsub{160}{c}+\irrepsub{224}{sc}+\irrepsub{840}{c}$\\
$\irrepsub{160}{c}\times\irrepsub{8}{s}$ & = & $\irrepsub{56}{v}+\irrepsub{160}{v}+\irrepsub{224}{cv}+\irrepsub{840}{v}$\\
$\irrepsub{160}{c}\times\irrepsub{8}{v}$ & = & $\irrepsub{56}{s}+\irrepsub{160}{s}+\irrepsub{224}{cs}+\irrepsub{840}{s}$\\
$\irrepsub{160}{v}\times\irrepsub{8}{s}$ & = & $\irrepsub{56}{c}+\irrepsub{160}{c}+\irrepsub{224}{vc}+\irrepsub{840}{c}$\\
$\irrepsub{160}{v}\times\irrepsub{8}{c}$ & = & $\irrepsub{56}{s}+\irrepsub{160}{s}+\irrepsub{224}{vs}+\irrepsub{840}{s}$\\
$\irrepsub{160}{v}\times\irrepsub{8}{v}$ & = & $\irrep{28}+\irrepsub{35}{v}+\irrep{300}+\irrep{350}+\irrepsub{567}{v}$\\
$\irrepsub{160}{s}\times\irrep{28}$ & = & $\irrepsub{8}{s}+\irrepsub{56}{s}+\irrepsub{112}{s}+2(\irrepsub{160}{s})+\irrepsub{224}{vs}+\irrepsub{224}{cs}+\irrepsub{840}{s}+\irrepsub{1296}{s}+\irrepsub{1400}{s}$\\
$\irrepsub{160}{v}\times\irrep{28}$ & = & $\irrepsub{8}{v}+\irrepsub{56}{v}+\irrepsub{112}{v}+2(\irrepsub{160}{v})+\irrepsub{224}{cv}+\irrepsub{224}{sv}+\irrepsub{840}{v}+\irrepsub{1296}{v}+\irrepsub{1400}{v}$\\
$\irrepsub{160}{s}\times\irrepsub{35}{c}$ & = & $\irrepsub{56}{s}+\irrepsub{160}{s}+\irrepsub{224}{vs}+\irrepsub{224}{cs}+\irrepsub{840}{s}+\irrepsub{1296}{s}+\irrepsub{2800}{cs}$\\
$\irrepsub{160}{s}\times\irrepsub{35}{v}$ & = & $\irrepsub{56}{s}+\irrepsub{160}{s}+\irrepsub{224}{vs}+\irrepsub{224}{cs}+\irrepsub{840}{s}+\irrepsub{1296}{s}+\irrepsub{2800}{vs}$\\
$\irrepsub{160}{c}\times\irrepsub{35}{c}$ & = & $\irrepsub{8}{c}+\irrepsub{56}{c}+\irrepsub{112}{c}+2(\irrepsub{160}{c})+\irrepsub{840}{c}+\irrepsub{1296}{c}+\irrepsub{1400}{c}+\irrepsub{1568}{c}$\\
$\irrepsub{160}{c}\times\irrepsub{35}{v}$ & = & $\irrepsub{56}{c}+\irrepsub{160}{c}+\irrepsub{224}{vc}+\irrepsub{224}{sc}+\irrepsub{840}{c}+\irrepsub{1296}{c}+\irrepsub{2800}{vc}$\\
$\irrepsub{160}{v}\times\irrepsub{35}{s}$ & = & $\irrepsub{56}{v}+\irrepsub{160}{v}+\irrepsub{224}{cv}+\irrepsub{224}{sv}+\irrepsub{840}{v}+\irrepsub{1296}{v}+\irrepsub{2800}{sv}$\\
$\irrepsub{160}{v}\times\irrepsub{35}{c}$ & = & $\irrepsub{56}{v}+\irrepsub{160}{v}+\irrepsub{224}{cv}+\irrepsub{224}{sv}+\irrepsub{840}{v}+\irrepsub{1296}{v}+\irrepsub{2800}{cv}$\\
$\irrepsub{160}{v}\times\irrepsub{35}{v}$ & = & $\irrepsub{8}{v}+\irrepsub{56}{v}+\irrepsub{112}{v}+2(\irrepsub{160}{v})+\irrepsub{840}{v}+\irrepsub{1296}{v}+\irrepsub{1400}{v}+\irrepsub{1568}{v}$\\
$\irrepsub{160}{s}\times\irrepsub{56}{s}$ & = & $\irrep{28}+\irrepsub{35}{v}+\irrepsub{35}{c}+\irrepsub{35}{s}+\irrep{300}+3(\irrep{350})+\irrepsub{567}{v}+\irrepsub{567}{c}+\irrepsub{567}{s}+\irrepsub[1]{840}{c}+\irrepsub[1]{840}{v}+\irrep{4096}$\\
$\irrepsub{160}{c}\times\irrepsub{56}{c}$ & = & $\irrep{28}+\irrepsub{35}{v}+\irrepsub{35}{c}+\irrepsub{35}{s}+\irrep{300}+3(\irrep{350})+\irrepsub{567}{v}+\irrepsub{567}{c}+\irrepsub{567}{s}+\irrepsub[1]{840}{s}+\irrepsub[1]{840}{v}+\irrep{4096}$\\
$\irrepsub{160}{c}\times\irrepsub{56}{s}$ & = & $\irrepsub{8}{v}+2(\irrepsub{56}{v})+2(\irrepsub{160}{v})+2(\irrepsub{224}{cv})+\irrepsub{224}{sv}+\irrepsub{672}{cs}+2(\irrepsub{840}{v})+\irrepsub{1296}{v}+\irrepsub{1400}{v}+\irrepsub{2800}{cv}$\\
$\irrepsub{160}{v}\times\irrepsub{56}{v}$ & = & $\irrep{28}+\irrepsub{35}{v}+\irrepsub{35}{c}+\irrepsub{35}{s}+\irrep{300}+3(\irrep{350})+\irrepsub{567}{v}+\irrepsub{567}{c}+\irrepsub{567}{s}+\irrepsub[1]{840}{s}+\irrepsub[1]{840}{c}+\irrep{4096}$\\
$\irrepsub{160}{v}\times\irrepsub{56}{c}$ & = & $\irrepsub{8}{s}+2(\irrepsub{56}{s})+2(\irrepsub{160}{s})+2(\irrepsub{224}{vs})+\irrepsub{224}{cs}+\irrepsub{672}{vc}+2(\irrepsub{840}{s})+\irrepsub{1296}{s}+\irrepsub{1400}{s}+\irrepsub{2800}{vs}$\\
$\irrepsub{160}{v}\times\irrepsub{56}{s}$ & = & $\irrepsub{8}{c}+2(\irrepsub{56}{c})+2(\irrepsub{160}{c})+2(\irrepsub{224}{vc})+\irrepsub{224}{sc}+\irrepsub{672}{vs}+2(\irrepsub{840}{c})+\irrepsub{1296}{c}+\irrepsub{1400}{c}+\irrepsub{2800}{vc}$\\
$\irrepsub{160}{s}\times\irrepsub{112}{s}$ & = & $\irrep{28}+\irrepsub{35}{s}+\irrepsub{294}{s}+\irrep{300}+\irrep{350}+2(\irrepsub{567}{s})+\irrepsub{3675}{s}+\irrepsub{3696}{s}+\irrep{4096}+\irrepsub{4312}{s}$\\
$\irrepsub{160}{v}\times\irrepsub{112}{v}$ & = & $\irrep{28}+\irrepsub{35}{v}+\irrepsub{294}{v}+\irrep{300}+\irrep{350}+2(\irrepsub{567}{v})+\irrepsub{3675}{v}+\irrepsub{3696}{v}+\irrep{4096}+\irrepsub{4312}{v}$\\
$\irrepsub{160}{s}\times\irrepsub{160}{s}$ & = & $\irrep{1}+2(\irrep{28})+\irrepsub{35}{v}+\irrepsub{35}{c}+2(\irrepsub{35}{s})+\irrepsub{294}{s}+2(\irrep{300})+3(\irrep{350})+\irrepsub{567}{v}+\irrepsub{567}{c}+3(\irrepsub{567}{s})+\irrepsub[1]{840}{s}+\irrepsub[1]{840}{c}+\irrepsub[1]{840}{v}+\irrep{1925}+\irrepsub{3675}{s}+2(\irrep{4096})+\irrepsub{4312}{s}$\\
$\irrepsub{160}{v}\times\irrepsub{160}{v}$ & = & $\irrep{1}+2(\irrep{28})+2(\irrepsub{35}{v})+\irrepsub{35}{c}+\irrepsub{35}{s}+\irrepsub{294}{v}+2(\irrep{300})+3(\irrep{350})+3(\irrepsub{567}{v})+\irrepsub{567}{c}+\irrepsub{567}{s}+\irrepsub[1]{840}{s}+\irrepsub[1]{840}{c}+\irrepsub[1]{840}{v}+\irrep{1925}+\irrepsub{3675}{v}+2(\irrep{4096})+\irrepsub{4312}{v}$\\
\end{longtable}
\newpage
\begin{longtable}{rcp{0.8\textwidth}}
\caption{\label{tab:SO9TensorProducts}SO(9) Tensor Products}\\
\toprule
\endfirsthead
\caption[]{SO(9) Tensor Products (continued)}\\
\endhead
\bottomrule
\endlastfoot
$\irrep{9}\times\irrep{9}$ & = & $\irrep{1}+\irrep{36}+\irrep{44}$\\
$\irrep{16}\times\irrep{9}$ & = & $\irrep{16}+\irrep{128}$\\
$\irrep{16}\times\irrep{16}$ & = & $\irrep{1}+\irrep{9}+\irrep{36}+\irrep{84}+\irrep{126}$\\
$\irrep{36}\times\irrep{9}$ & = & $\irrep{9}+\irrep{84}+\irrep{231}$\\
$\irrep{36}\times\irrep{16}$ & = & $\irrep{16}+\irrep{128}+\irrep{432}$\\
$\irrep{36}\times\irrep{36}$ & = & $\irrep{1}+\irrep{36}+\irrep{44}+\irrep{126}+\irrep{495}+\irrep{594}$\\
$\irrep{44}\times\irrep{9}$ & = & $\irrep{9}+\irrep{156}+\irrep{231}$\\
$\irrep{44}\times\irrep{16}$ & = & $\irrep{128}+\irrep{576}$\\
$\irrep{44}\times\irrep{36}$ & = & $\irrep{36}+\irrep{44}+\irrep{594}+\irrep{910}$\\
$\irrep{44}\times\irrep{44}$ & = & $\irrep{1}+\irrep{36}+\irrep{44}+\irrep{450}+\irrep{495}+\irrep{910}$\\
$\irrep{84}\times\irrep{9}$ & = & $\irrep{36}+\irrep{126}+\irrep{594}$\\
$\irrep{84}\times\irrep{16}$ & = & $\irrep{16}+\irrep{128}+\irrep{432}+\irrep{768}$\\
$\irrep{84}\times\irrep{36}$ & = & $\irrep{9}+\irrep{84}+\irrep{126}+\irrep{231}+\irrep{924}+\irrep{1650}$\\
$\irrep{84}\times\irrep{44}$ & = & $\irrep{84}+\irrep{231}+\irrep{924}+\irrep{2457}$\\
$\irrep{84}\times\irrep{84}$ & = & $\irrep{1}+\irrep{36}+\irrep{44}+\irrep{84}+\irrep{126}+\irrep{495}+\irrep{594}+\irrep{924}+\irrep{1980}+\irrep{2772}$\\
$\irrep{126}\times\irrep{9}$ & = & $\irrep{84}+\irrep{126}+\irrep{924}$\\
$\irrep{126}\times\irrep{16}$ & = & $\irrep{16}+\irrep{128}+\irrep{432}+\irrep{672}+\irrep{768}$\\
$\irrep{126}\times\irrep{36}$ & = & $\irrep{36}+\irrep{84}+\irrep{126}+\irrep{594}+\irrep{924}+\irrep{2772}$\\
$\irrep{126}\times\irrep{44}$ & = & $\irrep{126}+\irrep{594}+\irrep{924}+\irrep{3900}$\\
$\irrep{126}\times\irrep{84}$ & = & $\irrep{9}+\irrep{36}+\irrep{84}+\irrep{126}+\irrep{231}+\irrep{594}+\irrep{924}+\irrep{1650}+\irrep{2772}+\irrep{4158}$\\
$\irrep{126}\times\irrep{126}$ & = & $\irrep{1}+\irrep{9}+\irrep{36}+\irrep{44}+\irrep{84}+\irrep{126}+\irrep{231}+\irrep{495}+\irrep{594}+\irrep{924}+\irrep{1650}+\irrep{1980}+\irrep{2772}+\irrep[1]{2772}+\irrep{4158}$\\
$\irrep{128}\times\irrep{9}$ & = & $\irrep{16}+\irrep{128}+\irrep{432}+\irrep{576}$\\
$\irrep{128}\times\irrep{16}$ & = & $\irrep{9}+\irrep{36}+\irrep{44}+\irrep{84}+\irrep{126}+\irrep{231}+\irrep{594}+\irrep{924}$\\
$\irrep{128}\times\irrep{36}$ & = & $\irrep{16}+2(\irrep{128})+\irrep{432}+\irrep{576}+\irrep{768}+\irrep{2560}$\\
$\irrep{128}\times\irrep{44}$ & = & $\irrep{16}+\irrep{128}+\irrep{432}+\irrep{576}+\irrep{1920}+\irrep{2560}$\\
$\irrep{128}\times\irrep{84}$ & = & $\irrep{16}+2(\irrep{128})+2(\irrep{432})+\irrep{576}+\irrep{672}+\irrep{768}+\irrep{2560}+\irrep{5040}$\\
$\irrep{128}\times\irrep{126}$ & = & $\irrep{16}+2(\irrep{128})+2(\irrep{432})+\irrep{576}+\irrep{672}+2(\irrep{768})+\irrep{2560}+\irrep{4608}+\irrep{5040}$\\
$\irrep{128}\times\irrep{128}$ & = & $\irrep{1}+\irrep{9}+2(\irrep{36})+\irrep{44}+2(\irrep{84})+2(\irrep{126})+\irrep{156}+2(\irrep{231})+\irrep{495}+2(\irrep{594})+\irrep{910}+2(\irrep{924})+\irrep{1650}+\irrep{2457}+\irrep{2772}+\irrep{3900}$\\
$\irrep{156}\times\irrep{9}$ & = & $\irrep{44}+\irrep{450}+\irrep{910}$\\
$\irrep{156}\times\irrep{16}$ & = & $\irrep{576}+\irrep{1920}$\\
$\irrep{156}\times\irrep{36}$ & = & $\irrep{156}+\irrep{231}+\irrep{2457}+\irrep[2]{2772}$\\
$\irrep{156}\times\irrep{44}$ & = & $\irrep{9}+\irrep{156}+\irrep{231}+\irrep{1122}+\irrep{2574}+\irrep[2]{2772}$\\
$\irrep{156}\times\irrep{84}$ & = & $\irrep{594}+\irrep{910}+\irrep{3900}+\irrep{7700}$\\
$\irrep{156}\times\irrep{126}$ & = & $\irrep{924}+\irrep{2457}+\irrep{3900}+\irrep{12375}$\\
$\irrep{156}\times\irrep{128}$ & = & $\irrep{128}+\irrep{576}+\irrep{1920}+\irrep{2560}+\irrep{5280}+\irrep{9504}$\\
$\irrep{156}\times\irrep{156}$ & = & $\irrep{1}+\irrep{36}+\irrep{44}+\irrep{450}+\irrep{495}+\irrep{910}+\irrep{2508}+\irrep{4004}+\irrep{7140}+\irrep{8748}$\\
$\irrep{231}\times\irrep{9}$ & = & $\irrep{36}+\irrep{44}+\irrep{495}+\irrep{594}+\irrep{910}$\\
$\irrep{231}\times\irrep{16}$ & = & $\irrep{128}+\irrep{432}+\irrep{576}+\irrep{2560}$\\
$\irrep{231}\times\irrep{36}$ & = & $\irrep{9}+\irrep{84}+\irrep{156}+2(\irrep{231})+\irrep{924}+\irrep{1650}+\irrep{2457}+\irrep{2574}$\\
$\irrep{231}\times\irrep{44}$ & = & $\irrep{9}+\irrep{84}+\irrep{156}+2(\irrep{231})+\irrep{1650}+\irrep{2457}+\irrep{2574}+\irrep[2]{2772}$\\
$\irrep{231}\times\irrep{84}$ & = & $\irrep{36}+\irrep{44}+\irrep{126}+\irrep{495}+2(\irrep{594})+\irrep{910}+\irrep{924}+\irrep{2772}+\irrep{3900}+\irrep{9009}$\\
$\irrep{231}\times\irrep{126}$ & = & $\irrep{84}+\irrep{126}+\irrep{231}+\irrep{594}+2(\irrep{924})+\irrep{1650}+\irrep{2457}+\irrep{2772}+\irrep{3900}+\irrep{15444}$\\
$\irrep{231}\times\irrep{128}$ & = & $\irrep{16}+2(\irrep{128})+2(\irrep{432})+2(\irrep{576})+\irrep{768}+\irrep{1920}+2(\irrep{2560})+\irrep{4928}+\irrep{5040}+\irrep{9504}$\\
$\irrep{231}\times\irrep{156}$ & = & $\irrep{36}+\irrep{44}+\irrep{450}+\irrep{495}+\irrep{594}+2(\irrep{910})+\irrep{7140}+\irrep{7700}+\irrep{8748}+\irrep{9009}$\\
$\irrep{231}\times\irrep{231}$ & = & $\irrep{1}+2(\irrep{36})+2(\irrep{44})+\irrep{126}+\irrep{450}+2(\irrep{495})+3(\irrep{594})+3(\irrep{910})+\irrep{1980}+\irrep{2772}+\irrep{3900}+\irrep{4004}+\irrep{7700}+\irrep{8748}+2(\irrep{9009})$\\
$\irrep{432}\times\irrep{9}$ & = & $\irrep{128}+\irrep{432}+\irrep{768}+\irrep{2560}$\\
$\irrep{432}\times\irrep{16}$ & = & $\irrep{36}+\irrep{84}+\irrep{126}+\irrep{231}+\irrep{495}+\irrep{594}+\irrep{924}+\irrep{1650}+\irrep{2772}$\\
$\irrep{432}\times\irrep{36}$ & = & $\irrep{16}+\irrep{128}+2(\irrep{432})+\irrep{576}+\irrep{672}+\irrep{768}+\irrep{2560}+\irrep{4928}+\irrep{5040}$\\
$\irrep{432}\times\irrep{44}$ & = & $\irrep{128}+\irrep{432}+\irrep{576}+\irrep{768}+\irrep{2560}+\irrep{5040}+\irrep{9504}$\\
$\irrep{432}\times\irrep{84}$ & = & $\irrep{16}+2(\irrep{128})+2(\irrep{432})+\irrep{576}+\irrep{672}+2(\irrep{768})+2(\irrep{2560})+\irrep{4608}+\irrep{4928}+\irrep{5040}+\irrep{12672}$\\
$\irrep{432}\times\irrep{126}$ & = & $\irrep{16}+2(\irrep{128})+3(\irrep{432})+\irrep{576}+\irrep{672}+2(\irrep{768})+2(\irrep{2560})+\irrep{4608}+\irrep{4928}+2(\irrep{5040})+\irrep{12672}+\irrep[1]{12672}$\\
$\irrep{432}\times\irrep{128}$ & = & $\irrep{9}+\irrep{36}+\irrep{44}+2(\irrep{84})+2(\irrep{126})+2(\irrep{231})+\irrep{495}+3(\irrep{594})+\irrep{910}+3(\irrep{924})+2(\irrep{1650})+\irrep{1980}+\irrep{2457}+\irrep{2574}+2(\irrep{2772})+\irrep{3900}+\irrep{4158}+\irrep{9009}+\irrep{15444}$\\
$\irrep{432}\times\irrep{156}$ & = & $\irrep{432}+\irrep{576}+\irrep{1920}+\irrep{2560}+\irrep{5040}+\irrep{9504}+\irrep{19712}+\irrep{27648}$\\
$\irrep{432}\times\irrep{231}$ & = & $\irrep{16}+2(\irrep{128})+2(\irrep{432})+2(\irrep{576})+\irrep{672}+2(\irrep{768})+\irrep{1920}+3(\irrep{2560})+\irrep{4608}+\irrep{4928}+2(\irrep{5040})+\irrep{9504}+\irrep{12672}+\irrep{19712}+\irrep{24192}$\\
$\irrep{432}\times\irrep{432}$ & = & $\irrep{1}+\irrep{9}+2(\irrep{36})+\irrep{44}+2(\irrep{84})+3(\irrep{126})+\irrep{156}+2(\irrep{231})+2(\irrep{495})+3(\irrep{594})+\irrep{910}+4(\irrep{924})+3(\irrep{1650})+\irrep{1980}+2(\irrep{2457})+\irrep{2574}+4(\irrep{2772})+\irrep[1]{2772}+2(\irrep{3900})+\irrep{4004}+2(\irrep{4158})+2(\irrep{9009})+\irrep{12012}+2(\irrep{15444})+\irrep[1]{15444}+\irrep{25740}+\irrep{27456}$\\
$\irrep{450}\times\irrep{9}$ & = & $\irrep{156}+\irrep{1122}+\irrep[2]{2772}$\\
$\irrep{450}\times\irrep{16}$ & = & $\irrep{1920}+\irrep{5280}$\\
$\irrep{450}\times\irrep{36}$ & = & $\irrep{450}+\irrep{910}+\irrep{7140}+\irrep{7700}$\\
$\irrep{450}\times\irrep{44}$ & = & $\irrep{44}+\irrep{450}+\irrep{910}+\irrep{2508}+\irrep{7140}+\irrep{8748}$\\
$\irrep{450}\times\irrep{84}$ & = & $\irrep{2457}+\irrep[2]{2772}+\irrep{12375}+\irrep{20196}$\\
$\irrep{450}\times\irrep{126}$ & = & $\irrep{3900}+\irrep{7700}+\irrep{12375}+\irrep{32725}$\\
$\irrep{450}\times\irrep{128}$ & = & $\irrep{576}+\irrep{1920}+\irrep{5280}+\irrep{9504}+\irrep[2]{12672}+\irrep{27648}$\\
$\irrep{450}\times\irrep{156}$ & = & $\irrep{9}+\irrep{156}+\irrep{231}+\irrep{1122}+\irrep{2574}+\irrep[2]{2772}+\irrep{5148}+\irrep{16302}+\irrep[1]{18018}+\irrep{23868}$\\
$\irrep{450}\times\irrep{231}$ & = & $\irrep{156}+\irrep{231}+\irrep{1122}+\irrep{2457}+\irrep{2574}+2(\irrep[2]{2772})+\irrep{16302}+\irrep{20196}+\irrep{23868}+\irrep{31500}$\\
$\irrep{450}\times\irrep{432}$ & = & $\irrep{1920}+\irrep{2560}+\irrep{5280}+\irrep{9504}+\irrep{19712}+\irrep{27648}+\irrep{59136}+\irrep{68640}$\\
$\irrep{450}\times\irrep{450}$ & = & $\irrep{1}+\irrep{36}+\irrep{44}+\irrep{450}+\irrep{495}+\irrep{910}+\irrep{2508}+\irrep{4004}+\irrep{7140}+\irrep{8748}+\irrep{9867}+\irrep{22932}+\irrep{33957}+\irrep{54978}+\irrep{56430}$\\
$\irrep{495}\times\irrep{9}$ & = & $\irrep{231}+\irrep{1650}+\irrep{2574}$\\
$\irrep{495}\times\irrep{16}$ & = & $\irrep{432}+\irrep{2560}+\irrep{4928}$\\
$\irrep{495}\times\irrep{36}$ & = & $\irrep{36}+\irrep{495}+\irrep{594}+\irrep{910}+\irrep{2772}+\irrep{4004}+\irrep{9009}$\\
$\irrep{495}\times\irrep{44}$ & = & $\irrep{44}+\irrep{495}+\irrep{594}+\irrep{910}+\irrep{1980}+\irrep{8748}+\irrep{9009}$\\
$\irrep{495}\times\irrep{84}$ & = & $\irrep{84}+\irrep{231}+\irrep{924}+\irrep{1650}+\irrep{2457}+\irrep{2574}+\irrep{2772}+\irrep{15444}+\irrep[1]{15444}$\\
$\irrep{495}\times\irrep{126}$ & = & $\irrep{126}+\irrep{495}+\irrep{594}+\irrep{924}+\irrep{1650}+\irrep{2772}+\irrep{3900}+\irrep{9009}+\irrep{15444}+\irrep{27456}$\\
$\irrep{495}\times\irrep{128}$ & = & $\irrep{128}+\irrep{432}+\irrep{576}+\irrep{768}+2(\irrep{2560})+\irrep{4928}+\irrep{5040}+\irrep{9504}+\irrep{12672}+\irrep{24192}$\\
$\irrep{495}\times\irrep{156}$ & = & $\irrep{156}+\irrep{231}+\irrep{1650}+\irrep{2457}+\irrep{2574}+\irrep[2]{2772}+\irrep{12012}+\irrep{23868}+\irrep{31500}$\\
$\irrep{495}\times\irrep{231}$ & = & $\irrep{9}+\irrep{84}+\irrep{156}+2(\irrep{231})+\irrep{924}+2(\irrep{1650})+2(\irrep{2457})+2(\irrep{2574})+\irrep[2]{2772}+\irrep{4158}+\irrep{12012}+\irrep{15444}+\irrep[1]{15444}+\irrep[1]{18018}+\irrep{31500}$\\
$\irrep{495}\times\irrep{432}$ & = & $\irrep{16}+\irrep{128}+2(\irrep{432})+\irrep{576}+\irrep{672}+\irrep{768}+\irrep{1920}+2(\irrep{2560})+\irrep{4608}+2(\irrep{4928})+2(\irrep{5040})+\irrep{9504}+\irrep{12672}+\irrep[1]{12672}+\irrep{19712}+\irrep{24192}+\irrep{34944}+\irrep{65536}$\\
$\irrep{495}\times\irrep{450}$ & = & $\irrep{450}+\irrep{495}+\irrep{910}+\irrep{7140}+\irrep{7700}+\irrep{8748}+\irrep{9009}+\irrep{44352}+\irrep{56430}+\irrep{87516}$\\
$\irrep{495}\times\irrep{495}$ & = & $\irrep{1}+\irrep{36}+\irrep{44}+\irrep{126}+\irrep{450}+2(\irrep{495})+\irrep{594}+\irrep{910}+\irrep{1980}+\irrep{2772}+\irrep[1]{2772}+\irrep{3900}+\irrep{4004}+\irrep{7700}+\irrep{8748}+2(\irrep{9009})+\irrep{22932}+\irrep{25740}+\irrep{27456}+\irrep{44352}+\irrep{71500}$\\
\end{longtable}
\newpage
{\setlength\extrarowheight{1.2pt}
\enlargethispage{10pt}
\begin{longtable}{rcp{0.8\textwidth}}
\caption{\label{tab:SO10TensorProducts}SO(10) Tensor Products}\\
\toprule
\endfirsthead
\caption[]{SO(10) Tensor Products (continued)}\\
\endhead
\bottomrule
\endlastfoot
$\irrep{10}\times\irrep{10}$ & = & $\irrep{1}+\irrep{45}+\irrep{54}$\\
$\irrep{16}\times\irrep{10}$ & = & $\irrepbar{16}+\irrepbar{144}$\\
$\irrep{16}\times\irrep{16}$ & = & $\irrep{10}+\irrep{120}+\irrepbar{126}$\\
$\irrepbar{16}\times\irrep{16}$ & = & $\irrep{1}+\irrep{45}+\irrep{210}$\\
$\irrep{45}\times\irrep{10}$ & = & $\irrep{10}+\irrep{120}+\irrep{320}$\\
$\irrep{45}\times\irrep{16}$ & = & $\irrep{16}+\irrep{144}+\irrep{560}$\\
$\irrep{45}\times\irrep{45}$ & = & $\irrep{1}+\irrep{45}+\irrep{54}+\irrep{210}+\irrep{770}+\irrep{945}$\\
$\irrep{54}\times\irrep{10}$ & = & $\irrep{10}+\irrep[1]{210}+\irrep{320}$\\
$\irrep{54}\times\irrep{16}$ & = & $\irrep{144}+\irrep{720}$\\
$\irrep{54}\times\irrep{45}$ & = & $\irrep{45}+\irrep{54}+\irrep{945}+\irrep{1386}$\\
$\irrep{54}\times\irrep{54}$ & = & $\irrep{1}+\irrep{45}+\irrep{54}+\irrep{660}+\irrep{770}+\irrep{1386}$\\
$\irrep{120}\times\irrep{10}$ & = & $\irrep{45}+\irrep{210}+\irrep{945}$\\
$\irrep{120}\times\irrep{16}$ & = & $\irrepbar{16}+\irrepbar{144}+\irrepbar{560}+\irrepbar{1200}$\\
$\irrep{120}\times\irrep{45}$ & = & $\irrep{10}+\irrep{120}+\irrep{126}+\irrepbar{126}+\irrep{320}+\irrep{1728}+\irrep{2970}$\\
$\irrep{120}\times\irrep{54}$ & = & $\irrep{120}+\irrep{320}+\irrep{1728}+\irrep{4312}$\\
$\irrep{120}\times\irrep{120}$ & = & $\irrep{1}+\irrep{45}+\irrep{54}+2(\irrep{210})+\irrep{770}+\irrep{945}+\irrep{1050}+\irrepbar{1050}+\irrep{4125}+\irrep{5940}$\\
$\irrep{126}\times\irrep{10}$ & = & $\irrep{210}+\irrep{1050}$\\
$\irrepbar{126}\times\irrep{16}$ & = & $\irrepbar{144}+\irrepbar{672}+\irrepbar{1200}$\\
$\irrep{126}\times\irrep{16}$ & = & $\irrepbar{16}+\irrepbar{560}+\irrepbar{1440}$\\
$\irrep{126}\times\irrep{45}$ & = & $\irrep{120}+\irrep{126}+\irrep{1728}+\irrep[1]{3696}$\\
$\irrep{126}\times\irrep{54}$ & = & $\irrepbar{126}+\irrep{1728}+\irrep{4950}$\\
$\irrep{126}\times\irrep{120}$ & = & $\irrep{45}+\irrep{210}+\irrep{945}+\irrep{1050}+\irrep{5940}+\irrep{6930}$\\
$\irrepbar{126}\times\irrep{126}$ & = & $\irrep{1}+\irrep{45}+\irrep{210}+\irrep{770}+\irrep{5940}+\irrep{8910}$\\
$\irrep{126}\times\irrep{126}$ & = & $\irrep{54}+\irrep{945}+\irrep{1050}+\irrep{2772}+\irrep{4125}+\irrep{6930}$\\
$\irrep{144}\times\irrep{10}$ & = & $\irrepbar{16}+\irrepbar{144}+\irrepbar{560}+\irrepbar{720}$\\
$\irrepbar{144}\times\irrep{16}$ & = & $\irrep{45}+\irrep{54}+\irrep{210}+\irrep{945}+\irrepbar{1050}$\\
$\irrep{144}\times\irrep{16}$ & = & $\irrep{10}+\irrep{120}+\irrep{126}+\irrep{320}+\irrep{1728}$\\
$\irrep{144}\times\irrep{45}$ & = & $\irrep{16}+2(\irrep{144})+\irrep{560}+\irrep{720}+\irrep{1200}+\irrep{3696}$\\
$\irrep{144}\times\irrep{54}$ & = & $\irrep{16}+\irrep{144}+\irrep{560}+\irrep{720}+\irrep{2640}+\irrep{3696}$\\
$\irrep{144}\times\irrep{120}$ & = & $\irrepbar{16}+2(\irrepbar{144})+2(\irrepbar{560})+\irrepbar{720}+\irrepbar{1200}+\irrepbar{1440}+\irrepbar{3696}+\irrepbar{8800}$\\
$\irrepbar{144}\times\irrep{126}$ & = & $\irrep{16}+\irrep{144}+\irrep{560}+\irrep{1200}+\irrep{1440}+\irrep{3696}+\irrep{11088}$\\
$\irrep{144}\times\irrep{126}$ & = & $\irrepbar{144}+\irrepbar{560}+\irrepbar{720}+\irrepbar{1200}+\irrepbar{1440}+\irrepbar{5280}+\irrepbar{8800}$\\
$\irrepbar{144}\times\irrep{144}$ & = & $\irrep{1}+2(\irrep{45})+\irrep{54}+2(\irrep{210})+\irrep{770}+2(\irrep{945})+\irrep{1050}+\irrepbar{1050}+\irrep{1386}+\irrep{5940}+\irrep{8085}$\\
$\irrep{144}\times\irrep{144}$ & = & $\irrep{10}+2(\irrep{120})+\irrep{126}+\irrepbar{126}+\irrep[1]{210}+2(\irrep{320})+2(\irrep{1728})+\irrep{2970}+\irrep[1]{3696}+\irrep{4312}+\irrep{4950}$\\
$\irrep{210}\times\irrep{10}$ & = & $\irrep{120}+\irrep{126}+\irrepbar{126}+\irrep{1728}$\\
$\irrep[1]{210}\times\irrep{10}$ & = & $\irrep{54}+\irrep{660}+\irrep{1386}$\\
$\irrep{210}\times\irrep{16}$ & = & $\irrep{16}+\irrep{144}+\irrep{560}+\irrep{1200}+\irrep{1440}$\\
$\irrep[1]{210}\times\irrep{16}$ & = & $\irrepbar{720}+\irrepbar{2640}$\\
$\irrep{210}\times\irrep{45}$ & = & $\irrep{45}+2(\irrep{210})+\irrep{945}+\irrep{1050}+\irrepbar{1050}+\irrep{5940}$\\
$\irrep[1]{210}\times\irrep{45}$ & = & $\irrep[1]{210}+\irrep{320}+\irrep{4312}+\irrep{4608}$\\
$\irrep{210}\times\irrep{54}$ & = & $\irrep{210}+\irrep{945}+\irrep{1050}+\irrepbar{1050}+\irrep{8085}$\\
$\irrep[1]{210}\times\irrep{54}$ & = & $\irrep{10}+\irrep[1]{210}+\irrep{320}+\irrep{1782}+\irrep{4410}+\irrep{4608}$\\
$\irrep{210}\times\irrep{120}$ & = & $\irrep{10}+2(\irrep{120})+\irrep{126}+\irrepbar{126}+\irrep{320}+2(\irrep{1728})+\irrep{2970}+\irrep[1]{3696}+\irrepbar[1]{3696}+\irrep{10560}$\\
$\irrep[1]{210}\times\irrep{120}$ & = & $\irrep{945}+\irrep{1386}+\irrep{8085}+\irrep{14784}$\\
$\irrep{210}\times\irrep{126}$ & = & $\irrep{10}+\irrep{120}+\irrep{126}+\irrep{320}+\irrep{1728}+\irrep{2970}+\irrep[1]{3696}+\irrep[1]{6930}+\irrep{10560}$\\
$\irrep[1]{210}\times\irrep{126}$ & = & $\irrepbar{1050}+\irrep{8085}+\irrep{17325}$\\
$\irrep{210}\times\irrep{144}$ & = & $\irrep{16}+2(\irrep{144})+2(\irrep{560})+\irrep{672}+\irrep{720}+2(\irrep{1200})+\irrep{1440}+\irrep{3696}+\irrep{8800}+\irrep{11088}$\\
$\irrep[1]{210}\times\irrep{144}$ & = & $\irrepbar{144}+\irrepbar{720}+\irrepbar{2640}+\irrepbar{3696}+\irrepbar{7920}+\irrepbar{15120}$\\
$\irrep{210}\times\irrep{210}$ & = & $\irrep{1}+2(\irrep{45})+\irrep{54}+2(\irrep{210})+\irrep{770}+2(\irrep{945})+\irrep{1050}+\irrepbar{1050}+\irrep{4125}+2(\irrep{5940})+\irrep{6930}+\irrepbar{6930}+\irrep{8910}$\\
$\irrep[1]{210}\times\irrep{210}$ & = & $\irrep{1728}+\irrep{4312}+\irrep{4950}+\irrepbar{4950}+\irrep{28160}$\\
$\irrep[1]{210}\times\irrep[1]{210}$ & = & $\irrep{1}+\irrep{45}+\irrep{54}+\irrep{660}+\irrep{770}+\irrep{1386}+\irrep{4290}+\irrep{7644}+\irrep{12870}+\irrep{16380}$\\
$\irrep{320}\times\irrep{10}$ & = & $\irrep{45}+\irrep{54}+\irrep{770}+\irrep{945}+\irrep{1386}$\\
$\irrep{320}\times\irrep{16}$ & = & $\irrepbar{144}+\irrepbar{560}+\irrepbar{720}+\irrepbar{3696}$\\
$\irrep{320}\times\irrep{45}$ & = & $\irrep{10}+\irrep{120}+\irrep[1]{210}+2(\irrep{320})+\irrep{1728}+\irrep{2970}+\irrep{4312}+\irrep{4410}$\\
$\irrep{320}\times\irrep{54}$ & = & $\irrep{10}+\irrep{120}+\irrep[1]{210}+2(\irrep{320})+\irrep{2970}+\irrep{4312}+\irrep{4410}+\irrep{4608}$\\
$\irrep{320}\times\irrep{120}$ & = & $\irrep{45}+\irrep{54}+\irrep{210}+\irrep{770}+2(\irrep{945})+\irrep{1050}+\irrepbar{1050}+\irrep{1386}+\irrep{5940}+\irrep{8085}+\irrep{17920}$\\
$\irrep{320}\times\irrep{126}$ & = & $\irrep{210}+\irrep{945}+\irrep{1050}+\irrepbar{1050}+\irrep{5940}+\irrep{8085}+\irrep{23040}$\\
$\irrep{320}\times\irrep{144}$ & = & $\irrepbar{16}+2(\irrepbar{144})+2(\irrepbar{560})+2(\irrepbar{720})+\irrepbar{1200}+\irrepbar{2640}+2(\irrepbar{3696})+\irrepbar{8064}+\irrepbar{8800}+\irrepbar{15120}$\\
$\irrep{320}\times\irrep{210}$ & = & $\irrep{120}+\irrep{126}+\irrepbar{126}+\irrep{320}+3(\irrep{1728})+\irrep{2970}+\irrep[1]{3696}+\irrepbar[1]{3696}+\irrep{4312}+\irrep{4950}+\irrepbar{4950}+\irrep{36750}$\\
$\irrep{320}\times\irrep[1]{210}$ & = & $\irrep{45}+\irrep{54}+\irrep{660}+\irrep{770}+\irrep{945}+2(\irrep{1386})+\irrep{12870}+\irrep{14784}+\irrep{16380}+\irrep{17920}$\\
$\irrep{320}\times\irrep{320}$ & = & $\irrep{1}+2(\irrep{45})+2(\irrep{54})+\irrep{210}+\irrep{660}+2(\irrep{770})+3(\irrep{945})+3(\irrep{1386})+\irrep{4125}+\irrep{5940}+\irrep{7644}+\irrep{8085}+\irrep{14784}+\irrep{16380}+2(\irrep{17920})$\\
$\irrep{560}\times\irrep{10}$ & = & $\irrepbar{144}+\irrepbar{560}+\irrepbar{1200}+\irrepbar{3696}$\\
$\irrep{560}\times\irrep{16}$ & = & $\irrep{120}+\irrepbar{126}+\irrep{320}+\irrep{1728}+\irrep{2970}+\irrepbar[1]{3696}$\\
$\irrepbar{560}\times\irrep{16}$ & = & $\irrep{45}+\irrep{210}+\irrep{770}+\irrep{945}+\irrep{1050}+\irrep{5940}$\\
$\irrep{560}\times\irrep{45}$ & = & $\irrep{16}+\irrep{144}+2(\irrep{560})+\irrep{720}+\irrep{1200}+\irrep{1440}+\irrep{3696}+\irrep{8064}+\irrep{8800}$\\
$\irrep{560}\times\irrep{54}$ & = & $\irrep{144}+\irrep{560}+\irrep{720}+\irrep{1200}+\irrep{3696}+\irrep{8800}+\irrep{15120}$\\
$\irrep{560}\times\irrep{120}$ & = & $\irrepbar{16}+2(\irrepbar{144})+2(\irrepbar{560})+\irrepbar{672}+\irrepbar{720}+2(\irrepbar{1200})+\irrepbar{1440}+2(\irrepbar{3696})+\irrepbar{8064}+\irrepbar{8800}+\irrepbar{11088}+\irrepbar{25200}$\\
$\irrep{560}\times\irrep{126}$ & = & $\irrepbar{16}+\irrepbar{144}+2(\irrepbar{560})+\irrepbar{1200}+\irrepbar{1440}+\irrepbar{3696}+\irrepbar{8064}+\irrepbar{8800}+\irrepbar{11088}+\irrepbar{34992}$\\
$\irrepbar{560}\times\irrep{126}$ & = & $\irrep{144}+\irrep{560}+\irrep{672}+\irrep{720}+2(\irrep{1200})+\irrep{3696}+\irrep{8800}+\irrep{11088}+\irrep{17280}+\irrep{25200}$\\
$\irrep{560}\times\irrep{144}$ & = & $\irrep{10}+2(\irrep{120})+\irrep{126}+\irrepbar{126}+2(\irrep{320})+3(\irrep{1728})+2(\irrep{2970})+\irrep[1]{3696}+\irrepbar[1]{3696}+\irrep{4312}+\irrep{4410}+\irrepbar{4950}+\irrep{10560}+\irrep{36750}$\\
$\irrepbar{560}\times\irrep{144}$ & = & $\irrep{45}+\irrep{54}+2(\irrep{210})+\irrep{770}+3(\irrep{945})+2(\irrep{1050})+\irrepbar{1050}+\irrep{1386}+\irrep{4125}+2(\irrep{5940})+\irrep{6930}+\irrep{8085}+\irrep{17920}+\irrep{23040}$\\
$\irrep{560}\times\irrep{210}$ & = & $\irrep{16}+2(\irrep{144})+3(\irrep{560})+\irrep{720}+2(\irrep{1200})+2(\irrep{1440})+2(\irrep{3696})+\irrep{5280}+\irrep{8064}+2(\irrep{8800})+\irrep{11088}+\irrep{25200}+\irrep{34992}$\\
$\irrep{560}\times\irrep[1]{210}$ & = & $\irrepbar{560}+\irrepbar{720}+\irrepbar{2640}+\irrepbar{3696}+\irrepbar{8800}+\irrepbar{15120}+\irrepbar{38016}+\irrepbar{48048}$\\
$\irrep{560}\times\irrep{320}$ & = & $\irrepbar{16}+2(\irrepbar{144})+2(\irrepbar{560})+2(\irrepbar{720})+2(\irrepbar{1200})+\irrepbar{1440}+\irrepbar{2640}+3(\irrepbar{3696})+\irrepbar{8064}+2(\irrepbar{8800})+\irrepbar{11088}+\irrepbar{15120}+\irrepbar{25200}+\irrepbar{38016}+\irrepbar{43680}$\\
$\irrep{560}\times\irrep{560}$ & = & $\irrep{10}+2(\irrep{120})+\irrep{126}+2(\irrepbar{126})+\irrep[1]{210}+2(\irrep{320})+4(\irrep{1728})+3(\irrep{2970})+\irrep[1]{3696}+3(\irrepbar[1]{3696})+2(\irrep{4312})+\irrep{4410}+\irrep{4950}+\irrepbar{4950}+\irrepbar[1]{6930}+2(\irrep{10560})+\irrep{27720}+\irrep{34398}+2(\irrep{36750})+\irrepbar{46800}+\irrepbar{48114}$\\
$\irrepbar{560}\times\irrep{560}$ & = & $\irrep{1}+2(\irrep{45})+\irrep{54}+3(\irrep{210})+2(\irrep{770})+3(\irrep{945})+2(\irrep{1050})+2(\irrepbar{1050})+\irrep{1386}+\irrep{4125}+4(\irrep{5940})+\irrep{6930}+\irrepbar{6930}+\irrep{7644}+2(\irrep{8085})+\irrep{8910}+2(\irrep{17920})+\irrep{23040}+\irrepbar{23040}+\irrep{72765}+\irrep{73710}$\\
$\irrep{660}\times\irrep{10}$ & = & $\irrep[1]{210}+\irrep{1782}+\irrep{4608}$\\
$\irrep{660}\times\irrep{16}$ & = & $\irrep{2640}+\irrep{7920}$\\
$\irrep{660}\times\irrep{45}$ & = & $\irrep{660}+\irrep{1386}+\irrep{12870}+\irrep{14784}$\\
$\irrep{660}\times\irrep{54}$ & = & $\irrep{54}+\irrep{660}+\irrep{1386}+\irrep{4290}+\irrep{12870}+\irrep{16380}$\\
$\irrep{660}\times\irrep{120}$ & = & $\irrep{4312}+\irrep{4608}+\irrep{28160}+\irrep{42120}$\\
$\irrep{660}\times\irrep{126}$ & = & $\irrepbar{4950}+\irrep{28160}+\irrep{50050}$\\
$\irrep{660}\times\irrep{144}$ & = & $\irrep{720}+\irrep{2640}+\irrep{7920}+\irrep{15120}+\irrep{20592}+\irrep{48048}$\\
$\irrep{660}\times\irrep[1]{210}$ & = & $\irrep{10}+\irrep[1]{210}+\irrep{320}+\irrep{1782}+\irrep{4410}+\irrep{4608}+\irrep{9438}+\irrep{31680}+\irrep{37632}+\irrep{48510}$\\
$\irrep{660}\times\irrep{320}$ & = & $\irrep[1]{210}+\irrep{320}+\irrep{1782}+\irrep{4312}+\irrep{4410}+2(\irrep{4608})+\irrep{31680}+\irrep{42120}+\irrep{48510}+\irrep{68640}$\\
\end{longtable}
}
\newpage
\enlargethispage{20pt}
\begin{longtable}{rcp{0.8\textwidth}}
\caption{\label{tab:SO11TensorProducts}SO(11) Tensor Products}\\
\toprule
\endfirsthead
\caption[]{SO(11) Tensor Products (continued)}\\
\endhead
\bottomrule
\endlastfoot
$\irrep{11}\times\irrep{11}$ & = & $\irrep{1}+\irrep{55}+\irrep{65}$\\
$\irrep{32}\times\irrep{11}$ & = & $\irrep{32}+\irrep{320}$\\
$\irrep{32}\times\irrep{32}$ & = & $\irrep{1}+\irrep{11}+\irrep{55}+\irrep{165}+\irrep{330}+\irrep{462}$\\
$\irrep{55}\times\irrep{11}$ & = & $\irrep{11}+\irrep{165}+\irrep{429}$\\
$\irrep{55}\times\irrep{32}$ & = & $\irrep{32}+\irrep{320}+\irrep{1408}$\\
$\irrep{55}\times\irrep{55}$ & = & $\irrep{1}+\irrep{55}+\irrep{65}+\irrep{330}+\irrep{1144}+\irrep{1430}$\\
$\irrep{65}\times\irrep{11}$ & = & $\irrep{11}+\irrep{275}+\irrep{429}$\\
$\irrep{65}\times\irrep{32}$ & = & $\irrep{320}+\irrep{1760}$\\
$\irrep{65}\times\irrep{55}$ & = & $\irrep{55}+\irrep{65}+\irrep{1430}+\irrep{2025}$\\
$\irrep{65}\times\irrep{65}$ & = & $\irrep{1}+\irrep{55}+\irrep{65}+\irrep{935}+\irrep{1144}+\irrep{2025}$\\
$\irrep{165}\times\irrep{11}$ & = & $\irrep{55}+\irrep{330}+\irrep{1430}$\\
$\irrep{165}\times\irrep{32}$ & = & $\irrep{32}+\irrep{320}+\irrep{1408}+\irrep{3520}$\\
$\irrep{165}\times\irrep{55}$ & = & $\irrep{11}+\irrep{165}+\irrep{429}+\irrep{462}+\irrep{3003}+\irrep{5005}$\\
$\irrep{165}\times\irrep{65}$ & = & $\irrep{165}+\irrep{429}+\irrep{3003}+\irrep{7128}$\\
$\irrep{165}\times\irrep{165}$ & = & $\irrep{1}+\irrep{55}+\irrep{65}+\irrep{330}+\irrep{462}+\irrep{1144}+\irrep{1430}+\irrep{4290}+\irrep{7865}+\irrep{11583}$\\
$\irrep{275}\times\irrep{11}$ & = & $\irrep{65}+\irrep{935}+\irrep{2025}$\\
$\irrep{275}\times\irrep{32}$ & = & $\irrep{1760}+\irrep{7040}$\\
$\irrep{275}\times\irrep{55}$ & = & $\irrep{275}+\irrep{429}+\irrep{7128}+\irrep{7293}$\\
$\irrep{275}\times\irrep{65}$ & = & $\irrep{11}+\irrep{275}+\irrep{429}+\irrep{2717}+\irrep{7150}+\irrep{7293}$\\
$\irrep{275}\times\irrep{165}$ & = & $\irrep{1430}+\irrep{2025}+\irrep{15400}+\irrep{26520}$\\
$\irrep{275}\times\irrep{275}$ & = & $\irrep{1}+\irrep{55}+\irrep{65}+\irrep{935}+\irrep{1144}+\irrep{2025}+\irrep{7007}+\irrep{13650}+\irrep{21945}+\irrep{28798}$\\
$\irrep{320}\times\irrep{11}$ & = & $\irrep{32}+\irrep{320}+\irrep{1408}+\irrep{1760}$\\
$\irrep{320}\times\irrep{32}$ & = & $\irrep{11}+\irrep{55}+\irrep{65}+\irrep{165}+\irrep{330}+\irrep{429}+\irrep{462}+\irrep{1430}+\irrep{3003}+\irrep{4290}$\\
$\irrep{320}\times\irrep{55}$ & = & $\irrep{32}+2(\irrep{320})+\irrep{1408}+\irrep{1760}+\irrep{3520}+\irrep{10240}$\\
$\irrep{320}\times\irrep{65}$ & = & $\irrep{32}+\irrep{320}+\irrep{1408}+\irrep{1760}+\irrep{7040}+\irrep{10240}$\\
$\irrep{320}\times\irrep{165}$ & = & $\irrep{32}+2(\irrep{320})+2(\irrep{1408})+\irrep{1760}+\irrep{3520}+\irrep{5280}+\irrep{10240}+\irrep{28512}$\\
$\irrep{320}\times\irrep{320}$ & = & $\irrep{1}+\irrep{11}+2(\irrep{55})+\irrep{65}+2(\irrep{165})+\irrep{275}+2(\irrep{330})+2(\irrep{429})+2(\irrep{462})+\irrep{1144}+2(\irrep{1430})+\irrep{2025}+2(\irrep{3003})+2(\irrep{4290})+\irrep{5005}+\irrep{7128}+\irrep{11583}+\irrep{15400}+\irrep{17160}+\irrep{22275}$\\
$\irrep{330}\times\irrep{11}$ & = & $\irrep{165}+\irrep{462}+\irrep{3003}$\\
$\irrep{330}\times\irrep{32}$ & = & $\irrep{32}+\irrep{320}+\irrep{1408}+\irrep{3520}+\irrep{5280}$\\
$\irrep{330}\times\irrep{55}$ & = & $\irrep{55}+\irrep{330}+\irrep{462}+\irrep{1430}+\irrep{4290}+\irrep{11583}$\\
$\irrep{330}\times\irrep{65}$ & = & $\irrep{330}+\irrep{1430}+\irrep{4290}+\irrep{15400}$\\
$\irrep{330}\times\irrep{165}$ & = & $\irrep{11}+\irrep{165}+\irrep{330}+\irrep{429}+\irrep{462}+\irrep{3003}+\irrep{4290}+\irrep{5005}+\irrep{17160}+\irrep{23595}$\\
$\irrep{330}\times\irrep{330}$ & = & $\irrep{1}+\irrep{55}+\irrep{65}+\irrep{165}+\irrep{330}+\irrep{462}+\irrep{1144}+\irrep{1430}+\irrep{3003}+\irrep{4290}+\irrep{7865}+\irrep{11583}+\irrep{17160}+\irrep[1]{23595}+\irrep{37752}$\\
$\irrep{429}\times\irrep{11}$ & = & $\irrep{55}+\irrep{65}+\irrep{1144}+\irrep{1430}+\irrep{2025}$\\
$\irrep{429}\times\irrep{32}$ & = & $\irrep{320}+\irrep{1408}+\irrep{1760}+\irrep{10240}$\\
$\irrep{429}\times\irrep{55}$ & = & $\irrep{11}+\irrep{165}+\irrep{275}+2(\irrep{429})+\irrep{3003}+\irrep{5005}+\irrep{7128}+\irrep{7150}$\\
$\irrep{429}\times\irrep{65}$ & = & $\irrep{11}+\irrep{165}+\irrep{275}+2(\irrep{429})+\irrep{5005}+\irrep{7128}+\irrep{7150}+\irrep{7293}$\\
$\irrep{429}\times\irrep{165}$ & = & $\irrep{55}+\irrep{65}+\irrep{330}+\irrep{1144}+2(\irrep{1430})+\irrep{2025}+\irrep{4290}+\irrep{11583}+\irrep{15400}+\irrep{33033}$\\
$\irrep{429}\times\irrep{275}$ & = & $\irrep{55}+\irrep{65}+\irrep{935}+\irrep{1144}+\irrep{1430}+2(\irrep{2025})+\irrep{21945}+\irrep{26520}+\irrep{28798}+\irrep{33033}$\\
$\irrep{429}\times\irrep{429}$ & = & $\irrep{1}+2(\irrep{55})+2(\irrep{65})+\irrep{330}+\irrep{935}+2(\irrep{1144})+3(\irrep{1430})+3(\irrep{2025})+\irrep{7865}+\irrep{11583}+\irrep{13650}+\irrep{15400}+\irrep{26520}+\irrep{28798}+2(\irrep{33033})$\\
$\irrep{462}\times\irrep{11}$ & = & $\irrep{330}+\irrep{462}+\irrep{4290}$\\
$\irrep{462}\times\irrep{32}$ & = & $\irrep{32}+\irrep{320}+\irrep{1408}+\irrep{3520}+\irrep{4224}+\irrep{5280}$\\
$\irrep{462}\times\irrep{55}$ & = & $\irrep{165}+\irrep{330}+\irrep{462}+\irrep{3003}+\irrep{4290}+\irrep{17160}$\\
$\irrep{462}\times\irrep{65}$ & = & $\irrep{462}+\irrep{3003}+\irrep{4290}+\irrep{22275}$\\
$\irrep{462}\times\irrep{165}$ & = & $\irrep{55}+\irrep{165}+\irrep{330}+\irrep{462}+\irrep{1430}+\irrep{3003}+\irrep{4290}+\irrep{11583}+\irrep{17160}+\irrep{37752}$\\
\end{longtable}
\newpage
\begin{longtable}{rcp{0.8\textwidth}}
\caption{\label{tab:SO12TensorProducts}SO(12) Tensor Products}\\
\toprule
\endfirsthead
\caption[]{SO(12) Tensor Products (continued)}\\
\endhead
\bottomrule
\endlastfoot
$\irrep{12}\times\irrep{12}$ & = & $\irrep{1}+\irrep{66}+\irrep{77}$\\
$\irrep{32}\times\irrep{12}$ & = & $\irrepbar{32}+\irrepbar{352}$\\
$\irrepbar{32}\times\irrep{32}$ & = & $\irrep{12}+\irrep{220}+\irrep{792}$\\
$\irrep{32}\times\irrep{32}$ & = & $\irrep{1}+\irrep{66}+\irrep{462}+\irrep{495}$\\
$\irrep{66}\times\irrep{12}$ & = & $\irrep{12}+\irrep{220}+\irrep{560}$\\
$\irrep{66}\times\irrep{32}$ & = & $\irrep{32}+\irrep{352}+\irrep{1728}$\\
$\irrep{66}\times\irrep{66}$ & = & $\irrep{1}+\irrep{66}+\irrep{77}+\irrep{495}+\irrep{1638}+\irrep{2079}$\\
$\irrep{77}\times\irrep{12}$ & = & $\irrep{12}+\irrep[1]{352}+\irrep{560}$\\
$\irrep{77}\times\irrep{32}$ & = & $\irrep{352}+\irrep{2112}$\\
$\irrep{77}\times\irrep{66}$ & = & $\irrep{66}+\irrep{77}+\irrep{2079}+\irrep{2860}$\\
$\irrep{77}\times\irrep{77}$ & = & $\irrep{1}+\irrep{66}+\irrep{77}+\irrep{1287}+\irrep{1638}+\irrep{2860}$\\
$\irrep{220}\times\irrep{12}$ & = & $\irrep{66}+\irrep{495}+\irrep{2079}$\\
$\irrep{220}\times\irrep{32}$ & = & $\irrepbar{32}+\irrepbar{352}+\irrepbar{1728}+\irrepbar[1]{4928}$\\
$\irrep{220}\times\irrep{66}$ & = & $\irrep{12}+\irrep{220}+\irrep{560}+\irrep{792}+\irrep{4928}+\irrep{8008}$\\
$\irrep{220}\times\irrep{77}$ & = & $\irrep{220}+\irrep{560}+\irrep{4928}+\irrep{11232}$\\
$\irrep{220}\times\irrep{220}$ & = & $\irrep{1}+\irrep{66}+\irrep{77}+\irrep{462}+\irrepbar{462}+\irrep{495}+\irrep{1638}+\irrep{2079}+\irrep{8085}+\irrep{14014}+\irrep{21021}$\\
$\irrep{352}\times\irrep{12}$ & = & $\irrepbar{32}+\irrepbar{352}+\irrepbar{1728}+\irrepbar{2112}$\\
$\irrep[1]{352}\times\irrep{12}$ & = & $\irrep{77}+\irrep{1287}+\irrep{2860}$\\
$\irrep{352}\times\irrep{32}$ & = & $\irrep{66}+\irrep{77}+\irrepbar{462}+\irrep{495}+\irrep{2079}+\irrep{8085}$\\
$\irrepbar{352}\times\irrep{32}$ & = & $\irrep{12}+\irrep{220}+\irrep{560}+\irrep{792}+\irrep{4752}+\irrep{4928}$\\
$\irrep[1]{352}\times\irrep{32}$ & = & $\irrepbar{2112}+\irrepbar{9152}$\\
$\irrep{352}\times\irrep{66}$ & = & $\irrep{32}+2(\irrep{352})+\irrep{1728}+\irrep{2112}+\irrep[1]{4928}+\irrep{13728}$\\
$\irrep[1]{352}\times\irrep{66}$ & = & $\irrep[1]{352}+\irrep{560}+\irrep[1]{11088}+\irrep{11232}$\\
$\irrep{352}\times\irrep{77}$ & = & $\irrep{32}+\irrep{352}+\irrep{1728}+\irrep{2112}+\irrep{9152}+\irrep{13728}$\\
$\irrep[1]{352}\times\irrep{77}$ & = & $\irrep{12}+\irrep[1]{352}+\irrep{560}+\irrep{4004}+\irrep{11088}+\irrep[1]{11088}$\\
$\irrep{352}\times\irrep{220}$ & = & $\irrepbar{32}+2(\irrepbar{352})+2(\irrepbar{1728})+\irrepbar{2112}+\irrepbar[1]{4928}+\irrepbar{8800}+\irrepbar{13728}+\irrepbar{43680}$\\
$\irrep[1]{352}\times\irrep{220}$ & = & $\irrep{2079}+\irrep{2860}+\irrep{27456}+\irrep{45045}$\\
$\irrep{352}\times\irrep{352}$ & = & $\irrep{1}+2(\irrep{66})+\irrep{77}+\irrep{462}+\irrepbar{462}+2(\irrep{495})+\irrep{1638}+2(\irrep{2079})+\irrep{2860}+2(\irrep{8085})+\irrep{21021}+\irrepbar{21450}+\irrepbar{27027}+\irrep{27456}$\\
$\irrepbar{352}\times\irrep{352}$ & = & $\irrep{12}+2(\irrep{220})+\irrep[1]{352}+2(\irrep{560})+2(\irrep{792})+\irrep{4752}+\irrepbar{4752}+2(\irrep{4928})+\irrep{8008}+\irrep{11232}+\irrep{36036}+\irrep{45760}$\\
$\irrep{462}\times\irrep{12}$ & = & $\irrep{792}+\irrep{4752}$\\
$\irrepbar{462}\times\irrep{32}$ & = & $\irrep{352}+\irrep[1]{4928}+\irrep{9504}$\\
$\irrep{462}\times\irrep{32}$ & = & $\irrep{32}+\irrep{1728}+\irrep{4224}+\irrep{8800}$\\
$\irrep{462}\times\irrep{66}$ & = & $\irrep{462}+\irrep{495}+\irrep{8085}+\irrep{21450}$\\
$\irrep{462}\times\irrep{77}$ & = & $\irrepbar{462}+\irrep{8085}+\irrep{27027}$\\
$\irrep{495}\times\irrep{12}$ & = & $\irrep{220}+\irrep{792}+\irrep{4928}$\\
$\irrep{495}\times\irrep{32}$ & = & $\irrep{32}+\irrep{352}+\irrep{1728}+\irrep[1]{4928}+\irrep{8800}$\\
$\irrep{495}\times\irrep{66}$ & = & $\irrep{66}+\irrep{462}+\irrepbar{462}+\irrep{495}+\irrep{2079}+\irrep{8085}+\irrep{21021}$\\
$\irrep{495}\times\irrep{77}$ & = & $\irrep{495}+\irrep{2079}+\irrep{8085}+\irrep{27456}$\\
$\irrep{495}\times\irrep{220}$ & = & $\irrep{12}+\irrep{220}+\irrep{560}+2(\irrep{792})+\irrep{4752}+\irrepbar{4752}+\irrep{4928}+\irrep{8008}+\irrep{36036}+\irrep{48048}$\\
$\irrep{560}\times\irrep{12}$ & = & $\irrep{66}+\irrep{77}+\irrep{1638}+\irrep{2079}+\irrep{2860}$\\
$\irrep{560}\times\irrep{32}$ & = & $\irrepbar{352}+\irrepbar{1728}+\irrepbar{2112}+\irrepbar{13728}$\\
$\irrep{560}\times\irrep{66}$ & = & $\irrep{12}+\irrep{220}+\irrep[1]{352}+2(\irrep{560})+\irrep{4928}+\irrep{8008}+\irrep{11088}+\irrep{11232}$\\
$\irrep{560}\times\irrep{77}$ & = & $\irrep{12}+\irrep{220}+\irrep[1]{352}+2(\irrep{560})+\irrep{8008}+\irrep{11088}+\irrep[1]{11088}+\irrep{11232}$\\
\end{longtable}
\newpage
\begin{longtable}{rcl}
\caption{\label{tab:SO13TensorProducts}SO(13) Tensor Products}\\
\toprule
\endfirsthead
\caption[]{SO(13) Tensor Products (continued)}\\
\endhead
\bottomrule
\endlastfoot
$\irrep{13}\times\irrep{13}$ & = & $\irrep{1}+\irrep{78}+\irrep{90}$\\
$\irrep{64}\times\irrep{13}$ & = & $\irrep{64}+\irrep{768}$\\
$\irrep{64}\times\irrep{64}$ & = & $\irrep{1}+\irrep{13}+\irrep{78}+\irrep{286}+\irrepbar{715}+\irrep{1287}+\irrep{1716}$\\
$\irrep{78}\times\irrep{13}$ & = & $\irrep{13}+\irrep{286}+\irrep{715}$\\
$\irrep{78}\times\irrep{64}$ & = & $\irrep{64}+\irrep{768}+\irrep{4160}$\\
$\irrep{78}\times\irrep{78}$ & = & $\irrep{1}+\irrep{78}+\irrep{90}+\irrepbar{715}+\irrep{2275}+\irrep{2925}$\\
$\irrep{90}\times\irrep{13}$ & = & $\irrep{13}+\irrep{442}+\irrep{715}$\\
$\irrep{90}\times\irrep{64}$ & = & $\irrep{768}+\irrep{4992}$\\
$\irrep{90}\times\irrep{78}$ & = & $\irrep{78}+\irrep{90}+\irrep{2925}+\irrep{3927}$\\
$\irrep{90}\times\irrep{90}$ & = & $\irrep{1}+\irrep{78}+\irrep{90}+\irrep{1729}+\irrep{2275}+\irrep{3927}$\\
$\irrep{286}\times\irrep{13}$ & = & $\irrep{78}+\irrepbar{715}+\irrep{2925}$\\
$\irrep{286}\times\irrep{64}$ & = & $\irrep{64}+\irrep{768}+\irrep{4160}+\irrep{13312}$\\
$\irrep{286}\times\irrep{78}$ & = & $\irrep{13}+\irrep{286}+\irrep{715}+\irrep{1287}+\irrep{7722}+\irrep{12285}$\\
$\irrep{286}\times\irrep{90}$ & = & $\irrep{286}+\irrep{715}+\irrep{7722}+\irrep{17017}$\\
$\irrep{442}\times\irrep{13}$ & = & $\irrep{90}+\irrep{1729}+\irrep{3927}$\\
$\irrep{442}\times\irrep{78}$ & = & $\irrep{442}+\irrep{715}+\irrep{16302}+\irrep{17017}$\\
$\irrep{442}\times\irrep{90}$ & = & $\irrep{13}+\irrep{442}+\irrep{715}+\irrep{5733}+\irrep{16302}+\irrep{16575}$\\
$\irrep{715}\times\irrep{13}$ & = & $\irrep{78}+\irrep{90}+\irrep{2275}+\irrep{2925}+\irrep{3927}$\\
$\irrep{715}\times\irrep{78}$ & = & $\irrep{13}+\irrep{286}+\irrep{442}+2(\irrep{715})+\irrep{7722}+\irrep{12285}+\irrep{16575}+\irrep{17017}$\\
$\irrep{715}\times\irrep{90}$ & = & $\irrep{13}+\irrep{286}+\irrep{442}+2(\irrep{715})+\irrep{12285}+\irrep{16302}+\irrep{16575}+\irrep{17017}$\\
$\irrep{768}\times\irrep{13}$ & = & $\irrep{64}+\irrep{768}+\irrep{4160}+\irrep{4992}$\\
$\irrep{1287}\times\irrep{13}$ & = & $\irrepbar{715}+\irrep{1716}+\irrep{14300}$\\
$\irrep{1729}\times\irrep{13}$ & = & $\irrep{442}+\irrep{5733}+\irrep{16302}$\\
$\irrep{2275}\times\irrep{13}$ & = & $\irrep{715}+\irrep{12285}+\irrep{16575}$\\
$\irrep{2925}\times\irrep{13}$ & = & $\irrep{286}+\irrep{715}+\irrep{7722}+\irrep{12285}+\irrep{17017}$\\
\end{longtable}
\begin{longtable}{rcl}
\caption{\label{tab:SO14TensorProducts}SO(14) Tensor Products}\\
\toprule
\endfirsthead
\caption[]{SO(14) Tensor Products (continued)}\\
\endhead
\bottomrule
\endlastfoot
$\irrep{14}\times\irrep{14}$ & = & $\irrep{1}+\irrep{91}+\irrep{104}$\\
$\irrep{64}\times\irrep{14}$ & = & $\irrepbar{64}+\irrepbar{832}$\\
$\irrepbar{64}\times\irrep{64}$ & = & $\irrep{1}+\irrep{91}+\irrep{1001}+\irrep{3003}$\\
$\irrep{64}\times\irrep{64}$ & = & $\irrep{14}+\irrep{364}+\irrep{1716}+\irrep{2002}$\\
$\irrep{91}\times\irrep{14}$ & = & $\irrep{14}+\irrep{364}+\irrep{896}$\\
$\irrep{91}\times\irrep{64}$ & = & $\irrep{64}+\irrep{832}+\irrep{4928}$\\
$\irrep{91}\times\irrep{91}$ & = & $\irrep{1}+\irrep{91}+\irrep{104}+\irrep{1001}+\irrep{3080}+\irrep{4004}$\\
$\irrep{104}\times\irrep{14}$ & = & $\irrep{14}+\irrep{546}+\irrep{896}$\\
$\irrep{104}\times\irrep{64}$ & = & $\irrep{832}+\irrep{5824}$\\
$\irrep{104}\times\irrep{91}$ & = & $\irrep{91}+\irrep{104}+\irrep{4004}+\irrep{5265}$\\
$\irrep{104}\times\irrep{104}$ & = & $\irrep{1}+\irrep{91}+\irrep{104}+\irrep{2275}+\irrep{3080}+\irrep{5265}$\\
$\irrep{364}\times\irrep{14}$ & = & $\irrep{91}+\irrep{1001}+\irrep{4004}$\\
$\irrep{546}\times\irrep{14}$ & = & $\irrep{104}+\irrep{2275}+\irrep{5265}$\\
$\irrep{832}\times\irrep{14}$ & = & $\irrepbar{64}+\irrepbar{832}+\irrepbar{4928}+\irrepbar{5824}$\\
\end{longtable}
\newpage
\begin{longtable}{rcl}
\caption{\label{tab:SO18TensorProducts}SO(18) Tensor Products}\\
\toprule
\endfirsthead
\caption[]{SO(18) Tensor Products (continued)}\\
\endhead
\bottomrule
\endlastfoot
$\irrep{18}\times\irrep{18}$ & = & $\irrep{1}+\irrep{153}+\irrep{170}$\\
$\irrep{153}\times\irrep{18}$ & = & $\irrep{18}+\irrep{816}+\irrep{1920}$\\
$\irrep{170}\times\irrep{18}$ & = & $\irrep{18}+\irrep{1122}+\irrep{1920}$\\
$\irrep{256}\times\irrep{18}$ & = & $\irrepbar{256}+\irrepbar{4352}$\\
\end{longtable}
\begin{longtable}{rcl}
\caption{\label{tab:SO22TensorProducts}SO(22) Tensor Products}\\
\toprule
\endfirsthead
\caption[]{SO(22) Tensor Products (continued)}\\
\endhead
\bottomrule
\endlastfoot
$\irrep{22}\times\irrep{22}$ & = & $\irrep{1}+\irrep{231}+\irrep{252}$\\
$\irrep{231}\times\irrep{22}$ & = & $\irrep{22}+\irrep{1540}+\irrep{3520}$\\
$\irrep{252}\times\irrep{22}$ & = & $\irrep{22}+\irrep{2002}+\irrep{3520}$\\
\end{longtable}
\begin{longtable}{rcl}
\caption{\label{tab:SO26TensorProducts}SO(26) Tensor Products}\\
\toprule
\endfirsthead
\caption[]{SO(26) Tensor Products (continued)}\\
\endhead
\bottomrule
\endlastfoot
$\irrep{26}\times\irrep{26}$ & = & $\irrep{1}+\irrep{325}+\irrep{350}$\\
$\irrep{325}\times\irrep{26}$ & = & $\irrep{26}+\irrep{2600}+\irrep{5824}$\\
$\irrep{325}\times\irrep{325}$ & = & $\irrep{1}+\irrep{325}+\irrep{350}+\irrep{14950}+\irrep{37674}+\irrep{52325}$\\
$\irrep{2600}\times\irrep{26}$ & = & $\irrep{325}+\irrep{14950}+\irrep{52325}$\\
$\irrep{2600}\times\irrep{325}$ & = & $\irrep{26}+\irrep{2600}+\irrep{5824}+\irrep{65780}+\irrep{320320}+\irrep{450450}$\\
\end{longtable}
\newpage
\subsubsection{\Sp{N}}
\enlargethispage{10pt}
\begin{longtable}{rcp{0.8\textwidth}}
\caption{\label{tab:Sp4TensorProducts}Sp(4) Tensor Products}\\
\toprule
\endfirsthead
\caption[]{Sp(4) Tensor Products (continued)}\\
\endhead
\bottomrule
\endlastfoot
$\irrep{4}\times\irrep{4}$ & = & $\irrep{1}+\irrep{5}+\irrep{10}$\\
$\irrep{5}\times\irrep{4}$ & = & $\irrep{4}+\irrep{16}$\\
$\irrep{5}\times\irrep{5}$ & = & $\irrep{1}+\irrep{10}+\irrep{14}$\\
$\irrep{10}\times\irrep{4}$ & = & $\irrep{4}+\irrep{16}+\irrep{20}$\\
$\irrep{10}\times\irrep{5}$ & = & $\irrep{5}+\irrep{10}+\irrep{35}$\\
$\irrep{10}\times\irrep{10}$ & = & $\irrep{1}+\irrep{5}+\irrep{10}+\irrep{14}+\irrep{35}+\irrep[1]{35}$\\
$\irrep{14}\times\irrep{4}$ & = & $\irrep{16}+\irrep{40}$\\
$\irrep{14}\times\irrep{5}$ & = & $\irrep{5}+\irrep{30}+\irrep{35}$\\
$\irrep{14}\times\irrep{10}$ & = & $\irrep{10}+\irrep{14}+\irrep{35}+\irrep{81}$\\
$\irrep{14}\times\irrep{14}$ & = & $\irrep{1}+\irrep{10}+\irrep{14}+\irrep[1]{35}+\irrep{55}+\irrep{81}$\\
$\irrep{16}\times\irrep{4}$ & = & $\irrep{5}+\irrep{10}+\irrep{14}+\irrep{35}$\\
$\irrep{16}\times\irrep{5}$ & = & $\irrep{4}+\irrep{16}+\irrep{20}+\irrep{40}$\\
$\irrep{16}\times\irrep{10}$ & = & $\irrep{4}+2(\irrep{16})+\irrep{20}+\irrep{40}+\irrep{64}$\\
$\irrep{16}\times\irrep{14}$ & = & $\irrep{4}+\irrep{16}+\irrep{20}+\irrep{40}+\irrep{64}+\irrep{80}$\\
$\irrep{16}\times\irrep{16}$ & = & $\irrep{1}+\irrep{5}+2(\irrep{10})+\irrep{14}+\irrep{30}+2(\irrep{35})+\irrep[1]{35}+\irrep{81}$\\
$\irrep{20}\times\irrep{4}$ & = & $\irrep{10}+\irrep{35}+\irrep[1]{35}$\\
$\irrep{20}\times\irrep{5}$ & = & $\irrep{16}+\irrep{20}+\irrep{64}$\\
$\irrep{20}\times\irrep{10}$ & = & $\irrep{4}+\irrep{16}+\irrep{20}+\irrep{40}+\irrep{56}+\irrep{64}$\\
$\irrep{20}\times\irrep{14}$ & = & $\irrep{16}+\irrep{20}+\irrep{40}+\irrep{64}+\irrep{140}$\\
$\irrep{20}\times\irrep{16}$ & = & $\irrep{5}+\irrep{10}+\irrep{14}+2(\irrep{35})+\irrep[1]{35}+\irrep{81}+\irrep{105}$\\
$\irrep{20}\times\irrep{20}$ & = & $\irrep{1}+\irrep{5}+\irrep{10}+\irrep{14}+\irrep{30}+\irrep{35}+\irrep[1]{35}+\irrep{81}+\irrep{84}+\irrep{105}$\\
$\irrep{30}\times\irrep{4}$ & = & $\irrep{40}+\irrep{80}$\\
$\irrep{30}\times\irrep{5}$ & = & $\irrep{14}+\irrep{55}+\irrep{81}$\\
$\irrep{30}\times\irrep{10}$ & = & $\irrep{30}+\irrep{35}+\irrep{81}+\irrep{154}$\\
$\irrep{30}\times\irrep{14}$ & = & $\irrep{5}+\irrep{30}+\irrep{35}+\irrep{91}+\irrep{105}+\irrep{154}$\\
$\irrep{30}\times\irrep{16}$ & = & $\irrep{16}+\irrep{40}+\irrep{64}+\irrep{80}+\irrep{140}+\irrep[1]{140}$\\
$\irrep{30}\times\irrep{20}$ & = & $\irrep{20}+\irrep{40}+\irrep{64}+\irrep{80}+\irrep{140}+\irrep{256}$\\
$\irrep{30}\times\irrep{30}$ & = & $\irrep{1}+\irrep{10}+\irrep{14}+\irrep[1]{35}+\irrep{55}+\irrep{81}+\irrep{84}+\irrep[2]{140}+\irrep{220}+\irrep{260}$\\
$\irrep{35}\times\irrep{4}$ & = & $\irrep{16}+\irrep{20}+\irrep{40}+\irrep{64}$\\
$\irrep[1]{35}\times\irrep{4}$ & = & $\irrep{20}+\irrep{56}+\irrep{64}$\\
$\irrep{35}\times\irrep{5}$ & = & $\irrep{10}+\irrep{14}+\irrep{35}+\irrep[1]{35}+\irrep{81}$\\
$\irrep[1]{35}\times\irrep{5}$ & = & $\irrep{35}+\irrep[1]{35}+\irrep{105}$\\
$\irrep{35}\times\irrep{10}$ & = & $\irrep{5}+\irrep{10}+\irrep{14}+\irrep{30}+2(\irrep{35})+\irrep[1]{35}+\irrep{81}+\irrep{105}$\\
$\irrep[1]{35}\times\irrep{10}$ & = & $\irrep{10}+\irrep{35}+\irrep[1]{35}+\irrep{81}+\irrep{84}+\irrep{105}$\\
$\irrep{35}\times\irrep{14}$ & = & $\irrep{5}+\irrep{10}+\irrep{30}+2(\irrep{35})+\irrep[1]{35}+\irrep{81}+\irrep{105}+\irrep{154}$\\
$\irrep[1]{35}\times\irrep{14}$ & = & $\irrep{14}+\irrep{35}+\irrep[1]{35}+\irrep{81}+\irrep{105}+\irrep{220}$\\
$\irrep{35}\times\irrep{16}$ & = & $\irrep{4}+2(\irrep{16})+2(\irrep{20})+2(\irrep{40})+\irrep{56}+2(\irrep{64})+\irrep{80}+\irrep{140}$\\
$\irrep[1]{35}\times\irrep{16}$ & = & $\irrep{16}+\irrep{20}+\irrep{40}+\irrep{56}+2(\irrep{64})+\irrep{140}+\irrep{160}$\\
$\irrep{35}\times\irrep{20}$ & = & $\irrep{4}+2(\irrep{16})+\irrep{20}+2(\irrep{40})+\irrep{56}+2(\irrep{64})+\irrep{80}+\irrep{140}+\irrep{160}$\\
$\irrep[1]{35}\times\irrep{20}$ & = & $\irrep{4}+\irrep{16}+\irrep{20}+\irrep{40}+\irrep{56}+\irrep{64}+\irrep{80}+\irrep{120}+\irrep{140}+\irrep{160}$\\
$\irrep{35}\times\irrep{30}$ & = & $\irrep{10}+\irrep{14}+\irrep{35}+\irrep[1]{35}+\irrep{55}+2(\irrep{81})+\irrep{105}+\irrep{154}+\irrep{220}+\irrep{260}$\\
$\irrep[1]{35}\times\irrep{30}$ & = & $\irrep{30}+\irrep{35}+\irrep[1]{35}+\irrep{81}+\irrep{105}+\irrep{154}+\irrep{220}+\irrep{390}$\\
$\irrep{35}\times\irrep{35}$ & = & $\irrep{1}+\irrep{5}+2(\irrep{10})+2(\irrep{14})+\irrep{30}+3(\irrep{35})+2(\irrep[1]{35})+\irrep{55}+3(\irrep{81})+\irrep{84}+2(\irrep{105})+\irrep{154}+\irrep{220}$\\
$\irrep[1]{35}\times\irrep{35}$ & = & $\irrep{5}+\irrep{10}+\irrep{14}+\irrep{30}+2(\irrep{35})+\irrep[1]{35}+2(\irrep{81})+\irrep{84}+2(\irrep{105})+\irrep{154}+\irrep{220}+\irrep{231}$\\
$\irrep[1]{35}\times\irrep[1]{35}$ & = & $\irrep{1}+\irrep{5}+\irrep{10}+\irrep{14}+\irrep{30}+\irrep{35}+\irrep[1]{35}+\irrep{55}+\irrep{81}+\irrep{84}+\irrep{105}+\irrep{154}+\irrep{165}+\irrep{220}+\irrep{231}$\\
\end{longtable}
\newpage
\begin{longtable}{rcp{0.8\textwidth}}
\caption{\label{tab:Sp6TensorProducts}Sp(6) Tensor Products}\\
\toprule
\endfirsthead
\caption[]{Sp(6) Tensor Products (continued)}\\
\endhead
\bottomrule
\endlastfoot
$\irrep{6}\times\irrep{6}$ & = & $\irrep{1}+\irrep{14}+\irrep{21}$\\
$\irrep[1]{14}\times\irrep{6}$ & = & $\irrep{14}+\irrep{70}$\\
$\irrep{14}\times\irrep{6}$ & = & $\irrep{6}+\irrep[1]{14}+\irrep{64}$\\
$\irrep[1]{14}\times\irrep[1]{14}$ & = & $\irrep{1}+\irrep{21}+\irrep{84}+\irrep{90}$\\
$\irrep[1]{14}\times\irrep{14}$ & = & $\irrep{6}+\irrep{64}+\irrep{126}$\\
$\irrep{14}\times\irrep{14}$ & = & $\irrep{1}+\irrep{14}+\irrep{21}+\irrep{70}+\irrep{90}$\\
$\irrep{21}\times\irrep{6}$ & = & $\irrep{6}+\irrep{56}+\irrep{64}$\\
$\irrep{21}\times\irrep[1]{14}$ & = & $\irrep[1]{14}+\irrep{64}+\irrep{216}$\\
$\irrep{21}\times\irrep{14}$ & = & $\irrep{14}+\irrep{21}+\irrep{70}+\irrep{189}$\\
$\irrep{21}\times\irrep{21}$ & = & $\irrep{1}+\irrep{14}+\irrep{21}+\irrep{90}+\irrep[1]{126}+\irrep{189}$\\
$\irrep{56}\times\irrep{6}$ & = & $\irrep{21}+\irrep[1]{126}+\irrep{189}$\\
$\irrep{56}\times\irrep[1]{14}$ & = & $\irrep{70}+\irrep{189}+\irrep{525}$\\
$\irrep{56}\times\irrep{14}$ & = & $\irrep{56}+\irrep{64}+\irrep{216}+\irrep{448}$\\
$\irrep{56}\times\irrep{21}$ & = & $\irrep{6}+\irrep{56}+\irrep{64}+\irrep{252}+\irrep{350}+\irrep{448}$\\
$\irrep{56}\times\irrep{56}$ & = & $\irrep{1}+\irrep{14}+\irrep{21}+\irrep{90}+\irrep[1]{126}+\irrep{189}+\irrep{385}+\irrep{462}+\irrep{924}+\irrep[1]{924}$\\
$\irrep{64}\times\irrep{6}$ & = & $\irrep{14}+\irrep{21}+\irrep{70}+\irrep{90}+\irrep{189}$\\
$\irrep{64}\times\irrep[1]{14}$ & = & $\irrep{14}+\irrep{21}+\irrep{70}+\irrep{90}+\irrep{189}+\irrep{512}$\\
$\irrep{64}\times\irrep{14}$ & = & $\irrep{6}+\irrep[1]{14}+\irrep{56}+2(\irrep{64})+\irrep{126}+\irrep{216}+\irrep{350}$\\
$\irrep{64}\times\irrep{21}$ & = & $\irrep{6}+\irrep[1]{14}+\irrep{56}+2(\irrep{64})+\irrep{126}+\irrep{216}+\irrep{350}+\irrep{448}$\\
$\irrep{64}\times\irrep{56}$ & = & $\irrep{14}+\irrep{21}+\irrep{70}+\irrep{90}+\irrep[1]{126}+2(\irrep{189})+\irrep{512}+\irrep{525}+\irrep{924}+\irrep[1]{924}$\\
$\irrep{64}\times\irrep{64}$ & = & $\irrep{1}+2(\irrep{14})+2(\irrep{21})+3(\irrep{70})+\irrep{84}+2(\irrep{90})+\irrep[1]{126}+3(\irrep{189})+\irrep{385}+2(\irrep{512})+\irrep{525}+\irrep{924}$\\
$\irrep{70}\times\irrep{6}$ & = & $\irrep[1]{14}+\irrep{64}+\irrep{126}+\irrep{216}$\\
$\irrep{70}\times\irrep[1]{14}$ & = & $\irrep{6}+\irrep{56}+\irrep{64}+\irrep{126}+\irrep{350}+\irrep{378}$\\
$\irrep{70}\times\irrep{14}$ & = & $\irrep{14}+\irrep{21}+\irrep{70}+\irrep{84}+\irrep{90}+\irrep{189}+\irrep{512}$\\
$\irrep{70}\times\irrep{21}$ & = & $\irrep{14}+2(\irrep{70})+\irrep{90}+\irrep{189}+\irrep{512}+\irrep{525}$\\
$\irrep{70}\times\irrep{56}$ & = & $\irrep[1]{14}+\irrep{64}+\irrep{126}+2(\irrep{216})+\irrep{350}+\irrep{448}+\irrep{1100}+\irrep{1386}$\\
$\irrep{70}\times\irrep{64}$ & = & $\irrep{6}+\irrep[1]{14}+\irrep{56}+3(\irrep{64})+2(\irrep{126})+2(\irrep{216})+2(\irrep{350})+\irrep{378}+\irrep{448}+\irrep{616}+\irrep{1386}$\\
$\irrep{70}\times\irrep{70}$ & = & $\irrep{1}+\irrep{14}+2(\irrep{21})+\irrep{70}+\irrep{84}+2(\irrep{90})+\irrep[1]{126}+2(\irrep{189})+\irrep{385}+2(\irrep{512})+\irrep{594}+\irrep{924}+\irrep{1078}$\\
$\irrep{84}\times\irrep{6}$ & = & $\irrep{126}+\irrep{378}$\\
$\irrep{84}\times\irrep[1]{14}$ & = & $\irrep[1]{14}+\irrep{216}+\irrep{330}+\irrep{616}$\\
$\irrep{84}\times\irrep{14}$ & = & $\irrep{70}+\irrep{512}+\irrep{594}$\\
$\irrep{84}\times\irrep{21}$ & = & $\irrep{84}+\irrep{90}+\irrep{512}+\irrep{1078}$\\
$\irrep{84}\times\irrep{56}$ & = & $\irrep{126}+\irrep{350}+\irrep{378}+\irrep{1386}+\irrep{2464}$\\
$\irrep{84}\times\irrep{64}$ & = & $\irrep{64}+\irrep{126}+\irrep{216}+\irrep{350}+\irrep{378}+\irrep{616}+\irrep{1386}+\irrep{2240}$\\
$\irrep{84}\times\irrep{70}$ & = & $\irrep{14}+\irrep{70}+\irrep{189}+\irrep{385}+\irrep{512}+\irrep{525}+\irrep{594}+\irrep[1]{1386}+\irrep{2205}$\\
$\irrep{84}\times\irrep{84}$ & = & $\irrep{1}+\irrep{21}+\irrep{84}+\irrep{90}+\irrep[1]{126}+\irrep{924}+\irrep{1001}+\irrep{1078}+\irrep{1274}+\irrep{2457}$\\
$\irrep{90}\times\irrep{6}$ & = & $\irrep{64}+\irrep{126}+\irrep{350}$\\
$\irrep{90}\times\irrep[1]{14}$ & = & $\irrep[1]{14}+\irrep{64}+\irrep{216}+\irrep{350}+\irrep{616}$\\
$\irrep{90}\times\irrep{14}$ & = & $\irrep{14}+\irrep{70}+\irrep{90}+\irrep{189}+\irrep{385}+\irrep{512}$\\
$\irrep{90}\times\irrep{21}$ & = & $\irrep{21}+\irrep{70}+\irrep{84}+\irrep{90}+\irrep{189}+\irrep{512}+\irrep{924}$\\
$\irrep{90}\times\irrep{56}$ & = & $\irrep{56}+\irrep{64}+\irrep{126}+\irrep{216}+\irrep{350}+\irrep{378}+\irrep{448}+\irrep{1386}+\irrep{2016}$\\
$\irrep{90}\times\irrep{64}$ & = & $\irrep{6}+\irrep[1]{14}+\irrep{56}+2(\irrep{64})+2(\irrep{126})+2(\irrep{216})+2(\irrep{350})+\irrep{378}+\irrep{448}+\irrep{616}+\irrep{1344}+\irrep{1386}$\\
$\irrep{90}\times\irrep{70}$ & = & $\irrep{14}+\irrep{21}+2(\irrep{70})+\irrep{90}+2(\irrep{189})+\irrep{385}+2(\irrep{512})+\irrep{525}+\irrep{594}+\irrep{924}+\irrep{2205}$\\
$\irrep{90}\times\irrep{84}$ & = & $\irrep{21}+\irrep{84}+\irrep{90}+\irrep{189}+\irrep{512}+\irrep{924}+\irrep{1078}+\irrep{2205}+\irrep{2457}$\\
$\irrep{90}\times\irrep{90}$ & = & $\irrep{1}+\irrep{14}+\irrep{21}+\irrep{70}+\irrep{84}+2(\irrep{90})+\irrep[1]{126}+\irrep{189}+\irrep{385}+2(\irrep{512})+\irrep{525}+\irrep{924}+\irrep{1078}+\irrep{1274}+\irrep{2205}$\\
\end{longtable}
\newpage
\begin{longtable}{rcp{0.8\textwidth}}
\caption{\label{tab:Sp8TensorProducts}Sp(8) Tensor Products}\\
\toprule
\endfirsthead
\caption[]{Sp(8) Tensor Products (continued)}\\
\endhead
\bottomrule
\endlastfoot
$\irrep{8}\times\irrep{8}$ & = & $\irrep{1}+\irrep{27}+\irrep{36}$\\
$\irrep{27}\times\irrep{8}$ & = & $\irrep{8}+\irrep{48}+\irrep{160}$\\
$\irrep{27}\times\irrep{27}$ & = & $\irrep{1}+\irrep{27}+\irrep{36}+\irrep{42}+\irrep{308}+\irrep{315}$\\
$\irrep{36}\times\irrep{8}$ & = & $\irrep{8}+\irrep{120}+\irrep{160}$\\
$\irrep{36}\times\irrep{27}$ & = & $\irrep{27}+\irrep{36}+\irrep{315}+\irrep{594}$\\
$\irrep{36}\times\irrep{36}$ & = & $\irrep{1}+\irrep{27}+\irrep{36}+\irrep{308}+\irrep{330}+\irrep{594}$\\
$\irrep{42}\times\irrep{8}$ & = & $\irrep{48}+\irrep{288}$\\
$\irrep{42}\times\irrep{27}$ & = & $\irrep{27}+\irrep{315}+\irrep[1]{792}$\\
$\irrep{42}\times\irrep{36}$ & = & $\irrep{42}+\irrep{315}+\irrep{1155}$\\
$\irrep{42}\times\irrep{42}$ & = & $\irrep{1}+\irrep{36}+\irrep{308}+\irrep[1]{594}+\irrep{825}$\\
$\irrep{48}\times\irrep{8}$ & = & $\irrep{27}+\irrep{42}+\irrep{315}$\\
$\irrep{48}\times\irrep{27}$ & = & $\irrep{8}+\irrep{48}+\irrep{160}+\irrep{288}+\irrep{792}$\\
$\irrep{48}\times\irrep{36}$ & = & $\irrep{48}+\irrep{160}+\irrep{288}+\irrep{1232}$\\
$\irrep{48}\times\irrep{42}$ & = & $\irrep{8}+\irrep{160}+\irrep{792}+\irrep{1056}$\\
$\irrep{48}\times\irrep{48}$ & = & $\irrep{1}+\irrep{27}+\irrep{36}+\irrep{308}+\irrep{315}+\irrep[1]{792}+\irrep{825}$\\
$\irrep{120}\times\irrep{8}$ & = & $\irrep{36}+\irrep{330}+\irrep{594}$\\
$\irrep{120}\times\irrep{27}$ & = & $\irrep{120}+\irrep{160}+\irrep{1232}+\irrep{1728}$\\
$\irrep{120}\times\irrep{36}$ & = & $\irrep{8}+\irrep{120}+\irrep{160}+\irrep[2]{792}+\irrep{1512}+\irrep{1728}$\\
$\irrep{120}\times\irrep{42}$ & = & $\irrep{288}+\irrep{1232}+\irrep{3520}$\\
$\irrep{120}\times\irrep{48}$ & = & $\irrep{315}+\irrep{594}+\irrep{1155}+\irrep{3696}$\\
$\irrep{120}\times\irrep{120}$ & = & $\irrep{1}+\irrep{27}+\irrep{36}+\irrep{308}+\irrep{330}+\irrep{594}+\irrep{1716}+\irrep{2184}+\irrep{4290}+\irrep{4914}$\\
$\irrep{160}\times\irrep{8}$ & = & $\irrep{27}+\irrep{36}+\irrep{308}+\irrep{315}+\irrep{594}$\\
$\irrep{160}\times\irrep{27}$ & = & $\irrep{8}+\irrep{48}+\irrep{120}+2(\irrep{160})+\irrep{288}+\irrep{792}+\irrep{1232}+\irrep{1512}$\\
$\irrep{160}\times\irrep{36}$ & = & $\irrep{8}+\irrep{48}+\irrep{120}+2(\irrep{160})+\irrep{792}+\irrep{1232}+\irrep{1512}+\irrep{1728}$\\
$\irrep{160}\times\irrep{42}$ & = & $\irrep{48}+\irrep{160}+\irrep{288}+\irrep{792}+\irrep{1232}+\irrep{4200}$\\
$\irrep{160}\times\irrep{48}$ & = & $\irrep{27}+\irrep{36}+\irrep{42}+\irrep{308}+2(\irrep{315})+\irrep{594}+\irrep[1]{792}+\irrep{1155}+\irrep{4096}$\\
$\irrep{160}\times\irrep{120}$ & = & $\irrep{27}+\irrep{36}+\irrep{308}+\irrep{315}+\irrep{330}+2(\irrep{594})+\irrep{3696}+\irrep{4096}+\irrep{4290}+\irrep{4914}$\\
$\irrep{160}\times\irrep{160}$ & = & $\irrep{1}+2(\irrep{27})+2(\irrep{36})+\irrep{42}+2(\irrep{308})+3(\irrep{315})+\irrep{330}+3(\irrep{594})+\irrep[1]{792}+\irrep{825}+\irrep{1155}+\irrep{2184}+\irrep{3696}+2(\irrep{4096})+\irrep{4914}$\\
$\irrep{288}\times\irrep{8}$ & = & $\irrep{42}+\irrep{315}+\irrep[1]{792}+\irrep{1155}$\\
$\irrep{288}\times\irrep{27}$ & = & $\irrep{48}+\irrep{160}+\irrep{288}+\irrep{792}+\irrep{1056}+\irrep{1232}+\irrep{4200}$\\
$\irrep{288}\times\irrep{36}$ & = & $\irrep{48}+2(\irrep{288})+\irrep{792}+\irrep{1232}+\irrep{3520}+\irrep{4200}$\\
$\irrep{288}\times\irrep{42}$ & = & $\irrep{8}+\irrep{120}+\irrep{160}+\irrep{792}+\irrep{1056}+\irrep{1512}+\irrep[1]{3696}+\irrep{4752}$\\
$\irrep{288}\times\irrep{48}$ & = & $\irrep{27}+\irrep{36}+\irrep{308}+\irrep{315}+\irrep{594}+\irrep[1]{594}+\irrep[1]{792}+\irrep{825}+\irrep{4096}+\irrep{6237}$\\
$\irrep{308}\times\irrep{8}$ & = & $\irrep{160}+\irrep{792}+\irrep{1512}$\\
$\irrep{308}\times\irrep{27}$ & = & $\irrep{27}+\irrep{308}+\irrep{315}+\irrep{594}+\irrep[1]{792}+\irrep{2184}+\irrep{4096}$\\
$\irrep{308}\times\irrep{36}$ & = & $\irrep{36}+\irrep{308}+\irrep{315}+\irrep{594}+\irrep{825}+\irrep{4096}+\irrep{4914}$\\
$\irrep{308}\times\irrep{42}$ & = & $\irrep{42}+\irrep{308}+\irrep{315}+\irrep{1155}+\irrep{4096}+\irrep{7020}$\\
$\irrep{308}\times\irrep{48}$ & = & $\irrep{48}+\irrep{160}+\irrep{288}+\irrep{792}+\irrep{1232}+\irrep{1512}+\irrep{4200}+\irrep{6552}$\\
\end{longtable}
\newpage
\begin{longtable}{rcp{0.8\textwidth}}
\caption{\label{tab:Sp10TensorProducts}Sp(10) Tensor Products}\\
\toprule
\endfirsthead
\caption[]{Sp(10) Tensor Products (continued)}\\
\endhead
\bottomrule
\endlastfoot
$\irrep{10}\times\irrep{10}$ & = & $\irrep{1}+\irrep{44}+\irrep{55}$\\
$\irrep{44}\times\irrep{10}$ & = & $\irrep{10}+\irrep{110}+\irrep{320}$\\
$\irrep{44}\times\irrep{44}$ & = & $\irrep{1}+\irrep{44}+\irrep{55}+\irrep{165}+\irrep{780}+\irrep{891}$\\
$\irrep{55}\times\irrep{10}$ & = & $\irrep{10}+\irrep{220}+\irrep{320}$\\
$\irrep{55}\times\irrep{44}$ & = & $\irrep{44}+\irrep{55}+\irrep{891}+\irrep{1430}$\\
$\irrep{55}\times\irrep{55}$ & = & $\irrep{1}+\irrep{44}+\irrep{55}+\irrep{715}+\irrep{780}+\irrep{1430}$\\
$\irrep{110}\times\irrep{10}$ & = & $\irrep{44}+\irrep{165}+\irrep{891}$\\
$\irrep{110}\times\irrep{44}$ & = & $\irrep{10}+\irrep{110}+\irrep{132}+\irrep{320}+\irrep{1408}+\irrep{2860}$\\
$\irrep{110}\times\irrep{55}$ & = & $\irrep{110}+\irrep{320}+\irrep{1408}+\irrep{4212}$\\
$\irrep{110}\times\irrep{110}$ & = & $\irrep{1}+\irrep{44}+\irrep{55}+\irrep{165}+\irrep{780}+\irrep{891}+\irrep{1155}+\irrep{4004}+\irrep{5005}$\\
$\irrep{132}\times\irrep{10}$ & = & $\irrep{165}+\irrep{1155}$\\
$\irrep{132}\times\irrep{44}$ & = & $\irrep{110}+\irrep{1408}+\irrep{4290}$\\
$\irrep{132}\times\irrep{55}$ & = & $\irrep{132}+\irrep{1408}+\irrep{5720}$\\
$\irrep{132}\times\irrep{110}$ & = & $\irrep{44}+\irrep{891}+\irrep{5005}+\irrep{8580}$\\
$\irrep{132}\times\irrep{132}$ & = & $\irrep{1}+\irrep{55}+\irrep{780}+\irrep{4004}+\irrep{4719}+\irrep{7865}$\\
$\irrep{165}\times\irrep{10}$ & = & $\irrep{110}+\irrep{132}+\irrep{1408}$\\
$\irrep{165}\times\irrep{44}$ & = & $\irrep{44}+\irrep{165}+\irrep{891}+\irrep{1155}+\irrep{5005}$\\
$\irrep{165}\times\irrep{55}$ & = & $\irrep{165}+\irrep{891}+\irrep{1155}+\irrep{6864}$\\
$\irrep{165}\times\irrep{110}$ & = & $\irrep{10}+\irrep{110}+\irrep{320}+\irrep{1408}+\irrep{2860}+\irrep{4290}+\irrep{9152}$\\
$\irrep{165}\times\irrep{132}$ & = & $\irrep{10}+\irrep{320}+\irrep{2860}+\irrep{9152}+\irrep{9438}$\\
$\irrep{165}\times\irrep{165}$ & = & $\irrep{1}+\irrep{44}+\irrep{55}+\irrep{780}+\irrep{891}+\irrep{4004}+\irrep{5005}+\irrep{7865}+\irrep{8580}$\\
$\irrep{220}\times\irrep{10}$ & = & $\irrep{55}+\irrep{715}+\irrep{1430}$\\
$\irrep{220}\times\irrep{44}$ & = & $\irrep{220}+\irrep{320}+\irrep{4212}+\irrep{4928}$\\
$\irrep{220}\times\irrep{55}$ & = & $\irrep{10}+\irrep{220}+\irrep{320}+\irrep{2002}+\irrep{4620}+\irrep{4928}$\\
$\irrep{220}\times\irrep{110}$ & = & $\irrep{891}+\irrep{1430}+\irrep{6864}+\irrep{15015}$\\
$\irrep{220}\times\irrep{132}$ & = & $\irrep{1155}+\irrep{6864}+\irrep{21021}$\\
$\irrep{220}\times\irrep{165}$ & = & $\irrep{1408}+\irrep{4212}+\irrep{5720}+\irrep{24960}$\\
$\irrep{220}\times\irrep{220}$ & = & $\irrep{1}+\irrep{44}+\irrep{55}+\irrep{715}+\irrep{780}+\irrep{1430}+\irrep[1]{5005}+\irrep{8250}+\irrep{14300}+\irrep{17820}$\\
$\irrep{320}\times\irrep{10}$ & = & $\irrep{44}+\irrep{55}+\irrep{780}+\irrep{891}+\irrep{1430}$\\
$\irrep{320}\times\irrep{44}$ & = & $\irrep{10}+\irrep{110}+\irrep{220}+2(\irrep{320})+\irrep{1408}+\irrep{2860}+\irrep{4212}+\irrep{4620}$\\
$\irrep{320}\times\irrep{55}$ & = & $\irrep{10}+\irrep{110}+\irrep{220}+2(\irrep{320})+\irrep{2860}+\irrep{4212}+\irrep{4620}+\irrep{4928}$\\
$\irrep{320}\times\irrep{110}$ & = & $\irrep{44}+\irrep{55}+\irrep{165}+\irrep{780}+2(\irrep{891})+\irrep{1155}+\irrep{1430}+\irrep{5005}+\irrep{6864}+\irrep{17920}$\\
$\irrep{320}\times\irrep{132}$ & = & $\irrep{165}+\irrep{891}+\irrep{1155}+\irrep{5005}+\irrep{6864}+\irrep{28160}$\\
$\irrep{320}\times\irrep{165}$ & = & $\irrep{110}+\irrep{132}+\irrep{320}+2(\irrep{1408})+\irrep{2860}+\irrep{4212}+\irrep{4290}+\irrep{5720}+\irrep{32340}$\\
$\irrep{320}\times\irrep{220}$ & = & $\irrep{44}+\irrep{55}+\irrep{715}+\irrep{780}+\irrep{891}+2(\irrep{1430})+\irrep{14300}+\irrep{15015}+\irrep{17820}+\irrep{17920}$\\
$\irrep{320}\times\irrep{320}$ & = & $\irrep{1}+2(\irrep{44})+2(\irrep{55})+\irrep{165}+\irrep{715}+2(\irrep{780})+3(\irrep{891})+3(\irrep{1430})+\irrep{4004}+\irrep{5005}+\irrep{6864}+\irrep{8250}+\irrep{15015}+\irrep{17820}+2(\irrep{17920})$\\
\end{longtable}
\newpage
\begin{longtable}{rcl}
\caption{\label{tab:Sp12TensorProducts}Sp(12) Tensor Products}\\
\toprule
\endfirsthead
\caption[]{Sp(12) Tensor Products (continued)}\\
\endhead
\bottomrule
\endlastfoot
$\irrep{12}\times\irrep{12}$ & = & $\irrep{1}+\irrep{65}+\irrep{78}$\\
$\irrep{65}\times\irrep{12}$ & = & $\irrep{12}+\irrep{208}+\irrep{560}$\\
$\irrep{65}\times\irrep{65}$ & = & $\irrep{1}+\irrep{65}+\irrep{78}+\irrep{429}+\irrep{1650}+\irrep{2002}$\\
$\irrep{78}\times\irrep{12}$ & = & $\irrep{12}+\irrep{364}+\irrep{560}$\\
$\irrep{78}\times\irrep{65}$ & = & $\irrep{65}+\irrep{78}+\irrep{2002}+\irrep{2925}$\\
$\irrep{78}\times\irrep{78}$ & = & $\irrep{1}+\irrep{65}+\irrep{78}+\irrep{1365}+\irrep{1650}+\irrep{2925}$\\
$\irrep{208}\times\irrep{12}$ & = & $\irrep{65}+\irrep{429}+\irrep{2002}$\\
$\irrep{208}\times\irrep{65}$ & = & $\irrep{12}+\irrep{208}+\irrep{560}+\irrep{572}+\irrep{4368}+\irrep{7800}$\\
$\irrep{208}\times\irrep{78}$ & = & $\irrep{208}+\irrep{560}+\irrep{4368}+\irrep{11088}$\\
$\irrep{208}\times\irrep{208}$ & = & $\irrep{1}+\irrep{65}+\irrep{78}+\irrep{429}+\irrep[1]{429}+\irrep{1650}+\irrep{2002}+\irrep{6006}+\irrep{13650}+\irrep{18954}$\\
$\irrep{364}\times\irrep{12}$ & = & $\irrep{78}+\irrep{1365}+\irrep{2925}$\\
$\irrep{364}\times\irrep{65}$ & = & $\irrep{364}+\irrep{560}+\irrep{11088}+\irrep{11648}$\\
$\irrep{364}\times\irrep{78}$ & = & $\irrep{12}+\irrep{364}+\irrep{560}+\irrep[1]{4368}+\irrep{11440}+\irrep{11648}$\\
$\irrep[1]{429}\times\irrep{12}$ & = & $\irrep{572}+\irrep{4576}$\\
$\irrep{429}\times\irrep{12}$ & = & $\irrep{208}+\irrep{572}+\irrep{4368}$\\
$\irrep[1]{429}\times\irrep{65}$ & = & $\irrep{429}+\irrep{6006}+\irrep{21450}$\\
$\irrep{429}\times\irrep{65}$ & = & $\irrep{65}+\irrep{429}+\irrep[1]{429}+\irrep{2002}+\irrep{6006}+\irrep{18954}$\\
\end{longtable}
\begin{longtable}{rcl}
\caption{\label{tab:Sp14TensorProducts}Sp(14) Tensor Products}\\
\toprule
\endfirsthead
\caption[]{Sp(14) Tensor Products (continued)}\\
\endhead
\bottomrule
\endlastfoot
$\irrep{14}\times\irrep{14}$ & = & $\irrep{1}+\irrep{90}+\irrep{105}$\\
$\irrep{90}\times\irrep{14}$ & = & $\irrep{14}+\irrep{350}+\irrep{896}$\\
$\irrep{90}\times\irrep{90}$ & = & $\irrep{1}+\irrep{90}+\irrep{105}+\irrep{910}+\irrep{3094}+\irrep{3900}$\\
$\irrep{105}\times\irrep{14}$ & = & $\irrep{14}+\irrep{560}+\irrep{896}$\\
$\irrep{105}\times\irrep{90}$ & = & $\irrep{90}+\irrep{105}+\irrep{3900}+\irrep{5355}$\\
$\irrep{105}\times\irrep{105}$ & = & $\irrep{1}+\irrep{90}+\irrep{105}+\irrep{2380}+\irrep{3094}+\irrep{5355}$\\
$\irrep{350}\times\irrep{14}$ & = & $\irrep{90}+\irrep{910}+\irrep{3900}$\\
$\irrep{560}\times\irrep{14}$ & = & $\irrep{105}+\irrep{2380}+\irrep{5355}$\\
$\irrep{896}\times\irrep{14}$ & = & $\irrep{90}+\irrep{105}+\irrep{3094}+\irrep{3900}+\irrep{5355}$\\
\end{longtable}
\newpage
\subsubsection{Exceptional Algebras}
\begin{longtable}{rcp{0.8\textwidth}}
\caption{\label{tab:E6TensorProducts}\E6 Tensor Products}\\
\toprule
\endfirsthead
\caption[]{\E6 Tensor Products (continued)}\\
\endhead
\bottomrule
\endlastfoot
$\irrepbar{27}\times\irrep{27}$ & = & $\irrep{1}+\irrep{78}+\irrep{650}$\\
$\irrep{27}\times\irrep{27}$ & = & $\irrepbar{27}+\irrepbar{351}+\irrepbar[1]{351}$\\
$\irrep{78}\times\irrep{27}$ & = & $\irrep{27}+\irrep{351}+\irrep{1728}$\\
$\irrep{78}\times\irrep{78}$ & = & $\irrep{1}+\irrep{78}+\irrep{650}+\irrep{2430}+\irrep{2925}$\\
$\irrep[1]{351}\times\irrep{27}$ & = & $\irrepbar{27}+\irrepbar{1728}+\irrepbar{7722}$\\
$\irrep{351}\times\irrep{27}$ & = & $\irrepbar{27}+\irrepbar{351}+\irrepbar{1728}+\irrepbar{7371}$\\
$\irrepbar{351}\times\irrep{27}$ & = & $\irrep{78}+\irrep{650}+\irrep{2925}+\irrep{5824}$\\
$\irrepbar[1]{351}\times\irrep{27}$ & = & $\irrep{650}+\irrep{3003}+\irrep{5824}$\\
$\irrep[1]{351}\times\irrep{78}$ & = & $\irrep{351}+\irrep[1]{351}+\irrep{7371}+\irrep{19305}$\\
$\irrep{351}\times\irrep{78}$ & = & $\irrep{27}+\irrep{351}+\irrep[1]{351}+\irrep{1728}+\irrep{7371}+\irrep{17550}$\\
$\irrep[1]{351}\times\irrep[1]{351}$ & = & $\irrepbar[1]{351}+\irrepbar{7371}+\irrepbar{7722}+\irrepbar[1]{19305}+\irrepbar{34398}+\irrepbar{54054}$\\
$\irrep[1]{351}\times\irrep{351}$ & = & $\irrepbar{351}+\irrepbar{1728}+\irrepbar{7371}+\irrepbar{7722}+\irrepbar{51975}+\irrepbar{54054}$\\
$\irrep{351}\times\irrep{351}$ & = & $\irrepbar{27}+\irrepbar{351}+\irrepbar[1]{351}+2(\irrepbar{1728})+\irrepbar{7371}+\irrepbar{7722}+\irrepbar{17550}+\irrepbar{34398}+\irrepbar{51975}$\\
$\irrepbar{351}\times\irrep{351}$ & = & $\irrep{1}+\irrep{78}+2(\irrep{650})+\irrep{2430}+\irrep{2925}+\irrep{5824}+\irrepbar{5824}+\irrep{34749}+\irrep{70070}$\\
$\irrepbar[1]{351}\times\irrep[1]{351}$ & = & $\irrep{1}+\irrep{78}+\irrep{650}+\irrep{2430}+\irrep{34749}+\irrep{85293}$\\
$\irrepbar[1]{351}\times\irrep{351}$ & = & $\irrep{78}+\irrep{650}+\irrep{2925}+\irrep{5824}+\irrep{34749}+\irrep{78975}$\\
$\irrep{650}\times\irrep{27}$ & = & $\irrep{27}+\irrep{351}+\irrep[1]{351}+\irrep{1728}+\irrep{7371}+\irrep{7722}$\\
$\irrep{650}\times\irrep{78}$ & = & $\irrep{78}+2(\irrep{650})+\irrep{2925}+\irrep{5824}+\irrepbar{5824}+\irrep{34749}$\\
$\irrep{650}\times\irrep[1]{351}$ & = & $\irrep{27}+\irrep{351}+\irrep[1]{351}+\irrep{1728}+\irrep{7371}+\irrep{7722}+\irrep{17550}+\irrep{19305}+\irrep{61425}+\irrep{112320}$\\
$\irrep{650}\times\irrep{351}$ & = & $\irrep{27}+2(\irrep{351})+\irrep[1]{351}+2(\irrep{1728})+2(\irrep{7371})+\irrep{7722}+\irrep{17550}+\irrep{19305}+\irrep{51975}+\irrep{112320}$\\
$\irrep{650}\times\irrep{650}$ & = & $\irrep{1}+2(\irrep{78})+3(\irrep{650})+\irrep{2430}+2(\irrep{2925})+\irrep{3003}+\irrepbar{3003}+2(\irrep{5824})+2(\irrepbar{5824})+2(\irrep{34749})+\irrep{70070}+\irrep{78975}+\irrepbar{78975}+\irrep{85293}$\\
$\irrepbar{1728}\times\irrep{27}$ & = & $\irrep{78}+\irrep{650}+\irrep{2430}+\irrep{2925}+\irrepbar{5824}+\irrep{34749}$\\
$\irrep{1728}\times\irrep{27}$ & = & $\irrepbar{351}+\irrepbar[1]{351}+\irrepbar{1728}+\irrepbar{7371}+\irrepbar{17550}+\irrepbar{19305}$\\
$\irrep{1728}\times\irrep{78}$ & = & $\irrep{27}+\irrep{351}+2(\irrep{1728})+\irrep{7371}+\irrep{7722}+\irrep{17550}+\irrep{46332}+\irrep{51975}$\\
$\irrepbar{1728}\times\irrep[1]{351}$ & = & $\irrep{650}+\irrep{2925}+\irrepbar{3003}+\irrep{5824}+2(\irrepbar{5824})+\irrep{34749}+\irrep{70070}+\irrepbar{78975}+\irrepbar{146432}+\irrepbar{252252}$\\
$\irrepbar{1728}\times\irrep{351}$ & = & $\irrep{78}+2(\irrep{650})+\irrep{2430}+2(\irrep{2925})+\irrepbar{3003}+\irrep{5824}+2(\irrepbar{5824})+2(\irrep{34749})+\irrep{70070}+\irrepbar{78975}+\irrep{105600}+\irrepbar{252252}$\\
$\irrep{1728}\times\irrep[1]{351}$ & = & $\irrepbar{27}+\irrepbar{351}+2(\irrepbar{1728})+\irrepbar{7371}+\irrepbar{7722}+\irrepbar{17550}+\irrepbar{46332}+\irrepbar{51975}+\irrepbar{112320}+\irrepbar{359424}$\\
$\irrep{1728}\times\irrep{351}$ & = & $\irrepbar{27}+2(\irrepbar{351})+\irrepbar[1]{351}+2(\irrepbar{1728})+2(\irrepbar{7371})+\irrepbar{7722}+2(\irrepbar{17550})+\irrepbar{19305}+\irrepbar{46332}+\irrepbar{51975}+\irrepbar{112320}+\irrepbar{314496}$\\
$\irrep{1728}\times\irrep{650}$ & = & $\irrep{27}+2(\irrep{351})+\irrep[1]{351}+3(\irrep{1728})+3(\irrep{7371})+2(\irrep{7722})+2(\irrep{17550})+\irrep{19305}+\irrep{34398}+\irrep{46332}+2(\irrep{51975})+\irrep{54054}+\irrep{112320}+\irrep{314496}+\irrep{359424}$\\
$\irrepbar{1728}\times\irrep{1728}$ & = & $\irrep{1}+2(\irrep{78})+3(\irrep{650})+2(\irrep{2430})+3(\irrep{2925})+2(\irrep{5824})+2(\irrepbar{5824})+4(\irrep{34749})+\irrep{43758}+2(\irrep{70070})+\irrep{78975}+\irrepbar{78975}+\irrep{85293}+2(\irrep{105600})+\irrep{252252}+\irrepbar{252252}+\irrep{812175}+\irrep{852930}$\\
$\irrep{1728}\times\irrep{1728}$ & = & $\irrepbar{27}+2(\irrepbar{351})+2(\irrepbar[1]{351})+2(\irrepbar{1728})+4(\irrepbar{7371})+\irrepbar{7722}+3(\irrepbar{17550})+3(\irrepbar{19305})+\irrepbar{34398}+\irrepbar{46332}+2(\irrepbar{51975})+\irrepbar{61425}+2(\irrepbar{112320})+2(\irrepbar{314496})+\irrepbar{386100}+\irrepbar{393822}+\irrepbar{459459}+\irrepbar{494208}$\\
$\irrep{2430}\times\irrep{27}$ & = & $\irrep{1728}+\irrep{17550}+\irrep{46332}$\\
$\irrep{2430}\times\irrep{78}$ & = & $\irrep{78}+\irrep{2430}+\irrep{2925}+\irrep{34749}+\irrep{43758}+\irrep{105600}$\\
$\irrep{2430}\times\irrep[1]{351}$ & = & $\irrep[1]{351}+\irrep{7371}+\irrep{17550}+\irrep{19305}+\irrep{34398}+\irrep{314496}+\irrep{459459}$\\
$\irrep{2430}\times\irrep{351}$ & = & $\irrep{351}+\irrep{1728}+\irrep{7371}+\irrep{17550}+\irrep{19305}+\irrep{46332}+\irrep{51975}+\irrep{314496}+\irrep{393822}$\\
$\irrep{2430}\times\irrep{650}$ & = & $\irrep{650}+\irrep{2430}+\irrep{2925}+\irrep{5824}+\irrepbar{5824}+2(\irrep{34749})+\irrep{70070}+\irrep{105600}+\irrep{252252}+\irrepbar{252252}+\irrep{812175}$\\
\end{longtable}
\newpage
\begin{longtable}{rcp{0.8\textwidth}}
\caption{\label{tab:E7TensorProducts}\E7 Tensor Products}\\
\toprule
\endfirsthead
\caption[]{\E7 Tensor Products (continued)}\\
\endhead
\bottomrule
\endlastfoot
$\irrep{56}\times\irrep{56}$ & = & $\irrep{1}+\irrep{133}+\irrep{1463}+\irrep{1539}$\\
$\irrep{133}\times\irrep{56}$ & = & $\irrep{56}+\irrep{912}+\irrep{6480}$\\
$\irrep{133}\times\irrep{133}$ & = & $\irrep{1}+\irrep{133}+\irrep{1539}+\irrep{7371}+\irrep{8645}$\\
$\irrep{912}\times\irrep{56}$ & = & $\irrep{133}+\irrep{1539}+\irrep{8645}+\irrep{40755}$\\
$\irrep{912}\times\irrep{133}$ & = & $\irrep{56}+\irrep{912}+\irrep{6480}+\irrep{27664}+\irrep{86184}$\\
$\irrep{912}\times\irrep{912}$ & = & $\irrep{1}+\irrep{133}+\irrep{1463}+\irrep{1539}+\irrep{7371}+\irrep{8645}+\irrep{40755}+\irrep{152152}+\irrep{253935}+\irrep{365750}$\\
$\irrep{1463}\times\irrep{56}$ & = & $\irrep{56}+\irrep{6480}+\irrep{24320}+\irrep{51072}$\\
$\irrep{1463}\times\irrep{133}$ & = & $\irrep{1463}+\irrep{1539}+\irrep{40755}+\irrep{150822}$\\
$\irrep{1463}\times\irrep{912}$ & = & $\irrep{912}+\irrep{6480}+\irrep{27664}+\irrep{51072}+\irrep{362880}+\irrep{885248}$\\
$\irrep{1463}\times\irrep{1463}$ & = & $\irrep{1}+\irrep{133}+\irrep{1463}+\irrep{1539}+\irrep{7371}+\irrep{150822}+\irrep{152152}+\irrep{293930}+\irrep{617253}+\irrep{915705}$\\
$\irrep{1539}\times\irrep{56}$ & = & $\irrep{56}+\irrep{912}+\irrep{6480}+\irrep{27664}+\irrep{51072}$\\
$\irrep{1539}\times\irrep{133}$ & = & $\irrep{133}+\irrep{1463}+\irrep{1539}+\irrep{8645}+\irrep{40755}+\irrep{152152}$\\
$\irrep{1539}\times\irrep{912}$ & = & $\irrep{56}+\irrep{912}+2(\irrep{6480})+\irrep{27664}+\irrep{51072}+\irrep{86184}+\irrep{362880}+\irrep{861840}$\\
$\irrep{1539}\times\irrep{1463}$ & = & $\irrep{133}+\irrep{1463}+\irrep{1539}+\irrep{8645}+\irrep{40755}+\irrep{150822}+\irrep{152152}+\irrep{915705}+\irrep{980343}$\\
$\irrep{1539}\times\irrep{1539}$ & = & $\irrep{1}+\irrep{133}+\irrep{1463}+2(\irrep{1539})+\irrep{7371}+\irrep{8645}+2(\irrep{40755})+\irrep{150822}+\irrep{152152}+\irrep{365750}+\irrep{617253}+\irrep{980343}$\\
\end{longtable}
\begin{longtable}{rcp{0.8\textwidth}}
\caption{\label{tab:E8TensorProducts}\E8 Tensor Products}\\
\toprule
\endfirsthead
\caption[]{\E8 Tensor Products (continued)}\\
\endhead
\bottomrule
\endlastfoot
$\irrep{248}\times\irrep{248}$ & = & $\irrep{1}+\irrep{248}+\irrep{3875}+\irrep{27000}+\irrep{30380}$\\
$\irrep{3875}\times\irrep{248}$ & = & $\irrep{248}+\irrep{3875}+\irrep{30380}+\irrep{147250}+\irrep{779247}$\\
$\irrep{3875}\times\irrep{3875}$ & = & $\irrep{1}+\irrep{248}+\irrep{3875}+\irrep{27000}+\irrep{30380}+\irrep{147250}+\irrep{779247}+\irrep{2450240}+\irrep{4881384}+\irrep{6696000}$\\
\end{longtable}
\begin{longtable}{rcp{0.8\textwidth}}
\caption{\label{tab:F4TensorProducts}\F4 Tensor Products}\\
\toprule
\endfirsthead
\caption[]{\F4 Tensor Products (continued)}\\
\endhead
\bottomrule
\endlastfoot
$\irrep{26}\times\irrep{26}$ & = & $\irrep{1}+\irrep{26}+\irrep{52}+\irrep{273}+\irrep{324}$\\
$\irrep{52}\times\irrep{26}$ & = & $\irrep{26}+\irrep{273}+\irrep{1053}$\\
$\irrep{52}\times\irrep{52}$ & = & $\irrep{1}+\irrep{52}+\irrep{324}+\irrep[1]{1053}+\irrep{1274}$\\
$\irrep{273}\times\irrep{26}$ & = & $\irrep{26}+\irrep{52}+\irrep{273}+\irrep{324}+\irrep{1053}+\irrep{1274}+\irrep{4096}$\\
$\irrep{273}\times\irrep{52}$ & = & $\irrep{26}+\irrep{273}+\irrep{324}+\irrep{1053}+\irrep{4096}+\irrep{8424}$\\
$\irrep{273}\times\irrep{273}$ & = & $\irrep{1}+\irrep{26}+\irrep{52}+2(\irrep{273})+2(\irrep{324})+2(\irrep{1053})+\irrep[1]{1053}+\irrep{1274}+\irrep{2652}+2(\irrep{4096})+\irrep{8424}+\irrep{10829}+\irrep{19278}+\irrep{19448}$\\
$\irrep{324}\times\irrep{26}$ & = & $\irrep{26}+\irrep{273}+\irrep{324}+\irrep{1053}+\irrep{2652}+\irrep{4096}$\\
$\irrep{324}\times\irrep{52}$ & = & $\irrep{52}+\irrep{273}+\irrep{324}+\irrep{1274}+\irrep{4096}+\irrep{10829}$\\
$\irrep{324}\times\irrep{273}$ & = & $\irrep{26}+\irrep{52}+2(\irrep{273})+\irrep{324}+2(\irrep{1053})+\irrep{1274}+\irrep{2652}+2(\irrep{4096})+\irrep{8424}+\irrep{10829}+\irrep{19278}+\irrep{34749}$\\
$\irrep{324}\times\irrep{324}$ & = & $\irrep{1}+\irrep{26}+\irrep{52}+\irrep{273}+2(\irrep{324})+\irrep{1053}+\irrep[1]{1053}+\irrep{1274}+\irrep{2652}+2(\irrep{4096})+\irrep{8424}+\irrep{10829}+\irrep{16302}+\irrep{19448}+\irrep{34749}$\\
$\irrep{1053}\times\irrep{26}$ & = & $\irrep{52}+\irrep{273}+\irrep{324}+\irrep{1053}+\irrep[1]{1053}+\irrep{1274}+\irrep{4096}+\irrep{8424}+\irrep{10829}$\\
$\irrep[1]{1053}\times\irrep{26}$ & = & $\irrep{1053}+\irrep{8424}+\irrep{17901}$\\
$\irrep{1053}\times\irrep{52}$ & = & $\irrep{26}+\irrep{273}+2(\irrep{1053})+\irrep{2652}+\irrep{4096}+\irrep{8424}+\irrep{17901}+\irrep{19278}$\\
$\irrep[1]{1053}\times\irrep{52}$ & = & $\irrep{52}+\irrep[1]{1053}+\irrep{1274}+\irrep{10829}+\irrep{12376}+\irrep{29172}$\\
$\irrep{1053}\times\irrep{273}$ & = & $\irrep{26}+\irrep{52}+2(\irrep{273})+2(\irrep{324})+2(\irrep{1053})+\irrep[1]{1053}+2(\irrep{1274})+\irrep{2652}+3(\irrep{4096})+2(\irrep{8424})+2(\irrep{10829})+\irrep{17901}+\irrep{19278}+\irrep{19448}+\irrep{29172}+\irrep{34749}+\irrep{106496}$\\
$\irrep[1]{1053}\times\irrep{273}$ & = & $\irrep{273}+\irrep{1053}+\irrep{4096}+\irrep{8424}+\irrep{10829}+\irrep{17901}+\irrep{19278}+\irrep{106496}+\irrep{119119}$\\
$\irrep{1053}\times\irrep{324}$ & = & $\irrep{26}+2(\irrep{273})+\irrep{324}+3(\irrep{1053})+\irrep{1274}+\irrep{2652}+3(\irrep{4096})+2(\irrep{8424})+\irrep{10829}+\irrep{17901}+2(\irrep{19278})+\irrep{19448}+\irrep{34749}+\irrep{76076}+\irrep{106496}$\\
$\irrep[1]{1053}\times\irrep{324}$ & = & $\irrep{324}+\irrep[1]{1053}+\irrep{1274}+\irrep{4096}+\irrep{8424}+\irrep{10829}+\irrep{19448}+\irrep{29172}+\irrep{106496}+\irrep[1]{160056}$\\
$\irrep{1053}\times\irrep{1053}$ & = & $\irrep{1}+\irrep{26}+2(\irrep{52})+2(\irrep{273})+3(\irrep{324})+2(\irrep{1053})+2(\irrep[1]{1053})+3(\irrep{1274})+\irrep{2652}+4(\irrep{4096})+3(\irrep{8424})+4(\irrep{10829})+\irrep{12376}+\irrep{16302}+\irrep{17901}+2(\irrep{19278})+2(\irrep{19448})+2(\irrep{29172})+2(\irrep{34749})+2(\irrep{106496})+\irrep{107406}+\irrep{119119}+\irrep{160056}+\irrep[1]{160056}$\\
$\irrep[1]{1053}\times\irrep{1053}$ & = & $\irrep{26}+\irrep{273}+2(\irrep{1053})+\irrep{2652}+\irrep{4096}+2(\irrep{8424})+2(\irrep{17901})+2(\irrep{19278})+\irrep{34749}+\irrep{76076}+\irrep{106496}+\irrep{107406}+\irrep{119119}+\irrep{184756}+\irrep{379848}$\\
$\irrep[1]{1053}\times\irrep[1]{1053}$ & = & $\irrep{1}+\irrep{52}+\irrep{324}+2(\irrep[1]{1053})+\irrep{1274}+\irrep{10829}+\irrep{12376}+\irrep{16302}+\irrep{19448}+2(\irrep{29172})+\irrep{100776}+\irrep{160056}+\irrep[1]{160056}+\irrep{226746}+\irrep{340119}$\\
$\irrep{1274}\times\irrep{26}$ & = & $\irrep{273}+\irrep{1053}+\irrep{4096}+\irrep{8424}+\irrep{19278}$\\
$\irrep{1274}\times\irrep{52}$ & = & $\irrep{52}+\irrep{324}+\irrep[1]{1053}+\irrep{1274}+\irrep{4096}+\irrep{10829}+\irrep{19448}+\irrep{29172}$\\
$\irrep{1274}\times\irrep{273}$ & = & $\irrep{26}+\irrep{273}+\irrep{324}+2(\irrep{1053})+\irrep{1274}+\irrep{2652}+2(\irrep{4096})+2(\irrep{8424})+\irrep{10829}+\irrep{17901}+\irrep{19278}+\irrep{19448}+\irrep{34749}+\irrep{106496}+\irrep{107406}$\\
$\irrep{1274}\times\irrep{324}$ & = & $\irrep{52}+\irrep{273}+\irrep{324}+\irrep{1053}+\irrep[1]{1053}+2(\irrep{1274})+2(\irrep{4096})+\irrep{8424}+2(\irrep{10829})+\irrep{19278}+\irrep{19448}+\irrep{29172}+\irrep{34749}+\irrep{106496}+\irrep{160056}$\\
$\irrep{1274}\times\irrep{1053}$ & = & $\irrep{26}+2(\irrep{273})+\irrep{324}+3(\irrep{1053})+2(\irrep{2652})+3(\irrep{4096})+3(\irrep{8424})+\irrep{10829}+2(\irrep{17901})+3(\irrep{19278})+\irrep{19448}+2(\irrep{34749})+\irrep{76076}+2(\irrep{106496})+\irrep{107406}+\irrep{119119}+\irrep{205751}+\irrep{379848}$\\
$\irrep{1274}\times\irrep[1]{1053}$ & = & $\irrep{52}+\irrep{324}+\irrep[1]{1053}+2(\irrep{1274})+\irrep{4096}+2(\irrep{10829})+\irrep{12376}+\irrep{19448}+2(\irrep{29172})+\irrep{34749}+\irrep{106496}+\irrep{160056}+\irrep[1]{160056}+\irrep{340119}+\irrep{420147}$\\
$\irrep{1274}\times\irrep{1274}$ & = & $\irrep{1}+\irrep{52}+\irrep{273}+2(\irrep{324})+2(\irrep[1]{1053})+2(\irrep{1274})+\irrep{2652}+2(\irrep{4096})+\irrep{8424}+3(\irrep{10829})+\irrep{12376}+\irrep{16302}+\irrep{19278}+2(\irrep{19448})+2(\irrep{29172})+\irrep{34749}+2(\irrep{106496})+\irrep{160056}+\irrep[1]{160056}+\irrep{205751}+\irrep{226746}+\irrep{420147}$\\
$\irrep{2652}\times\irrep{26}$ & = & $\irrep{324}+\irrep{2652}+\irrep{4096}+\irrep{10829}+\irrep{16302}+\irrep{34749}$\\
$\irrep{2652}\times\irrep{52}$ & = & $\irrep{1053}+\irrep{2652}+\irrep{4096}+\irrep{19278}+\irrep{34749}+\irrep{76076}$\\
$\irrep{2652}\times\irrep{273}$ & = & $\irrep{273}+\irrep{324}+\irrep{1053}+\irrep{1274}+\irrep{2652}+2(\irrep{4096})+\irrep{8424}+2(\irrep{10829})+\irrep{16302}+\irrep{19278}+\irrep{19448}+2(\irrep{34749})+\irrep{76076}+\irrep{106496}+\irrep{160056}+\irrep{212992}$\\
$\irrep{2652}\times\irrep{324}$ & = & $\irrep{26}+\irrep{273}+\irrep{324}+\irrep{1053}+2(\irrep{2652})+2(\irrep{4096})+\irrep{8424}+\irrep{10829}+\irrep{16302}+\irrep{17901}+\irrep{19278}+\irrep{19448}+2(\irrep{34749})+\irrep{76076}+\irrep{81081}+\irrep{106496}+\irrep{205751}+\irrep{212992}$\\
$\irrep{2652}\times\irrep{1053}$ & = & $\irrep{52}+\irrep{273}+\irrep{324}+\irrep{1053}+\irrep[1]{1053}+2(\irrep{1274})+\irrep{2652}+3(\irrep{4096})+2(\irrep{8424})+3(\irrep{10829})+\irrep{16302}+2(\irrep{19278})+2(\irrep{19448})+\irrep{29172}+3(\irrep{34749})+\irrep{76076}+2(\irrep{106496})+\irrep{107406}+2(\irrep{160056})+\irrep[1]{160056}+\irrep{205751}+\irrep{212992}+\irrep{412776}+\irrep{787644}$\\
$\irrep{2652}\times\irrep[1]{1053}$ & = & $\irrep{1053}+\irrep{2652}+\irrep{4096}+\irrep{8424}+\irrep{17901}+2(\irrep{19278})+\irrep{19448}+\irrep{34749}+\irrep{76076}+\irrep{106496}+\irrep{107406}+\irrep{205751}+\irrep{379848}+\irrep{787644}+\irrep{1002456}$\\
$\irrep{2652}\times\irrep{1274}$ & = & $\irrep{273}+2(\irrep{1053})+\irrep{1274}+\irrep{2652}+2(\irrep{4096})+2(\irrep{8424})+\irrep{10829}+\irrep{17901}+3(\irrep{19278})+\irrep{19448}+2(\irrep{34749})+2(\irrep{76076})+2(\irrep{106496})+\irrep{107406}+\irrep{160056}+\irrep{205751}+\irrep{212992}+\irrep{379848}+\irrep{787644}+\irrep{952952}$\\
$\irrep{2652}\times\irrep{2652}$ & = & $\irrep{1}+\irrep{26}+\irrep{52}+\irrep{273}+2(\irrep{324})+\irrep{1053}+\irrep[1]{1053}+\irrep{1274}+2(\irrep{2652})+2(\irrep{4096})+\irrep{8424}+2(\irrep{10829})+\irrep{12376}+2(\irrep{16302})+\irrep{17901}+\irrep{19278}+2(\irrep{19448})+\irrep{29172}+3(\irrep{34749})+\irrep{76076}+\irrep{81081}+2(\irrep{106496})+\irrep{107406}+\irrep{119119}+\irrep{160056}+\irrep[1]{160056}+2(\irrep{205751})+2(\irrep{212992})+\irrep{342056}+\irrep{412776}+\irrep{420147}+\irrep{629356}+\irrep{787644}+\irrep{1042899}+\irrep{1341522}$\\
\end{longtable}
\newpage
{\setlength\extrarowheight{1.1pt}
\enlargethispage{15pt}
\begin{longtable}{rcp{0.8\textwidth}}
\caption{\label{tab:G2TensorProducts}\G2 Tensor Products}\\
\toprule
\endfirsthead
\caption[]{\G2 Tensor Products (continued)}\\
\endhead
\bottomrule
\endlastfoot
$\irrep{7}\times\irrep{7}$ & = & $\irrep{1}+\irrep{7}+\irrep{14}+\irrep{27}$\\
$\irrep{14}\times\irrep{7}$ & = & $\irrep{7}+\irrep{27}+\irrep{64}$\\
$\irrep{14}\times\irrep{14}$ & = & $\irrep{1}+\irrep{14}+\irrep{27}+\irrep{77}+\irrep[1]{77}$\\
$\irrep{27}\times\irrep{7}$ & = & $\irrep{7}+\irrep{14}+\irrep{27}+\irrep{64}+\irrep{77}$\\
$\irrep{27}\times\irrep{14}$ & = & $\irrep{7}+\irrep{14}+\irrep{27}+\irrep{64}+\irrep{77}+\irrep{189}$\\
$\irrep{27}\times\irrep{27}$ & = & $\irrep{1}+\irrep{7}+\irrep{14}+2(\irrep{27})+2(\irrep{64})+\irrep{77}+\irrep[1]{77}+\irrep{182}+\irrep{189}$\\
$\irrep{64}\times\irrep{7}$ & = & $\irrep{14}+\irrep{27}+\irrep{64}+\irrep{77}+\irrep[1]{77}+\irrep{189}$\\
$\irrep{64}\times\irrep{14}$ & = & $\irrep{7}+\irrep{27}+2(\irrep{64})+\irrep{77}+\irrep{182}+\irrep{189}+\irrep{286}$\\
$\irrep{64}\times\irrep{27}$ & = & $\irrep{7}+\irrep{14}+2(\irrep{27})+2(\irrep{64})+2(\irrep{77})+\irrep[1]{77}+\irrep{182}+2(\irrep{189})+\irrep{286}+\irrep{448}$\\
$\irrep{64}\times\irrep{64}$ & = & $\irrep{1}+\irrep{7}+2(\irrep{14})+2(\irrep{27})+2(\irrep{64})+3(\irrep{77})+2(\irrep[1]{77})+2(\irrep{182})+3(\irrep{189})+\irrep{273}+\irrep{286}+\irrep{378}+2(\irrep{448})+\irrep{729}$\\
$\irrep[1]{77}\times\irrep{7}$ & = & $\irrep{64}+\irrep{189}+\irrep{286}$\\
$\irrep{77}\times\irrep{7}$ & = & $\irrep{27}+\irrep{64}+\irrep{77}+\irrep{182}+\irrep{189}$\\
$\irrep[1]{77}\times\irrep{14}$ & = & $\irrep{14}+\irrep{77}+\irrep[1]{77}+\irrep{189}+\irrep{273}+\irrep{448}$\\
$\irrep{77}\times\irrep{14}$ & = & $\irrep{14}+\irrep{27}+\irrep{64}+\irrep{77}+\irrep[1]{77}+\irrep{182}+\irrep{189}+\irrep{448}$\\
$\irrep[1]{77}\times\irrep{27}$ & = & $\irrep{27}+\irrep{64}+\irrep{77}+\irrep[1]{77}+\irrep{182}+\irrep{189}+\irrep{286}+\irrep{448}+\irrep{729}$\\
$\irrep{77}\times\irrep{27}$ & = & $\irrep{7}+\irrep{14}+\irrep{27}+2(\irrep{64})+2(\irrep{77})+\irrep[1]{77}+\irrep{182}+2(\irrep{189})+\irrep{286}+\irrep{378}+\irrep{448}$\\
$\irrep[1]{77}\times\irrep{64}$ & = & $\irrep{7}+\irrep{27}+2(\irrep{64})+\irrep{77}+2(\irrep{182})+2(\irrep{189})+2(\irrep{286})+\irrep{378}+\irrep{448}+\irrep{729}+\irrep{896}+\irrep{924}$\\
$\irrep{77}\times\irrep{64}$ & = & $\irrep{7}+\irrep{14}+2(\irrep{27})+3(\irrep{64})+2(\irrep{77})+\irrep[1]{77}+2(\irrep{182})+3(\irrep{189})+2(\irrep{286})+\irrep{378}+2(\irrep{448})+\irrep{729}+\irrep{924}$\\
$\irrep[1]{77}\times\irrep[1]{77}$ & = & $\irrep{1}+\irrep{14}+\irrep{27}+\irrep{77}+2(\irrep[1]{77})+\irrep{182}+\irrep{189}+\irrep{273}+\irrep{378}+2(\irrep{448})+\irrep{714}+\irrep{729}+\irrep{748}+\irrep{1547}$\\
$\irrep[1]{77}\times\irrep{77}$ & = & $\irrep{14}+\irrep{27}+\irrep{64}+2(\irrep{77})+\irrep[1]{77}+\irrep{182}+2(\irrep{189})+\irrep{273}+\irrep{286}+\irrep{378}+2(\irrep{448})+\irrep{729}+\irrep{924}+\irrep{1547}$\\
$\irrep{77}\times\irrep{77}$ & = & $\irrep{1}+\irrep{7}+\irrep{14}+2(\irrep{27})+2(\irrep{64})+2(\irrep{77})+2(\irrep[1]{77})+2(\irrep{182})+3(\irrep{189})+\irrep{273}+2(\irrep{286})+\irrep{378}+2(\irrep{448})+\irrep{714}+\irrep{729}+\irrep{924}$\\
$\irrep{182}\times\irrep{7}$ & = & $\irrep{77}+\irrep{182}+\irrep{189}+\irrep{378}+\irrep{448}$\\
$\irrep{182}\times\irrep{14}$ & = & $\irrep{64}+\irrep{77}+\irrep{182}+\irrep{189}+\irrep{286}+\irrep{378}+\irrep{448}+\irrep{924}$\\
$\irrep{182}\times\irrep{27}$ & = & $\irrep{27}+\irrep{64}+\irrep{77}+\irrep[1]{77}+2(\irrep{182})+2(\irrep{189})+\irrep{286}+\irrep{378}+2(\irrep{448})+\irrep{714}+\irrep{729}+\irrep{924}$\\
$\irrep{182}\times\irrep{64}$ & = & $\irrep{14}+\irrep{27}+2(\irrep{64})+2(\irrep{77})+2(\irrep[1]{77})+2(\irrep{182})+3(\irrep{189})+\irrep{273}+2(\irrep{286})+2(\irrep{378})+3(\irrep{448})+\irrep{714}+2(\irrep{729})+2(\irrep{924})+\irrep{1547}+\irrep{1728}$\\
$\irrep{182}\times\irrep[1]{77}$ & = & $\irrep{27}+2(\irrep{64})+\irrep{77}+\irrep[1]{77}+2(\irrep{182})+2(\irrep{189})+2(\irrep{286})+\irrep{378}+2(\irrep{448})+\irrep{714}+2(\irrep{729})+\irrep{896}+2(\irrep{924})+\irrep{1547}+\irrep{1728}+\irrep{2926}$\\
$\irrep{182}\times\irrep{77}$ & = & $\irrep{7}+\irrep{14}+\irrep{27}+2(\irrep{64})+2(\irrep{77})+\irrep[1]{77}+2(\irrep{182})+3(\irrep{189})+\irrep{273}+3(\irrep{286})+2(\irrep{378})+3(\irrep{448})+\irrep{714}+2(\irrep{729})+\irrep{896}+2(\irrep{924})+\irrep{1254}+\irrep{1547}+\irrep{1728}$\\
$\irrep{182}\times\irrep{182}$ & = & $\irrep{1}+\irrep{7}+\irrep{14}+2(\irrep{27})+2(\irrep{64})+2(\irrep{77})+2(\irrep[1]{77})+3(\irrep{182})+3(\irrep{189})+2(\irrep{273})+3(\irrep{286})+2(\irrep{378})+4(\irrep{448})+2(\irrep{714})+4(\irrep{729})+\irrep{748}+2(\irrep{896})+3(\irrep{924})+\irrep{1254}+2(\irrep{1547})+2(\irrep{1728})+\irrep{2079}+\irrep[1]{2079}+\irrep{2926}+\irrep{3003}$\\
$\irrep{189}\times\irrep{7}$ & = & $\irrep{64}+\irrep{77}+\irrep[1]{77}+\irrep{182}+\irrep{189}+\irrep{286}+\irrep{448}$\\
$\irrep{189}\times\irrep{14}$ & = & $\irrep{27}+\irrep{64}+\irrep{77}+\irrep[1]{77}+\irrep{182}+2(\irrep{189})+\irrep{286}+\irrep{378}+\irrep{448}+\irrep{729}$\\
$\irrep{189}\times\irrep{27}$ & = & $\irrep{14}+\irrep{27}+2(\irrep{64})+2(\irrep{77})+\irrep[1]{77}+2(\irrep{182})+3(\irrep{189})+\irrep{273}+2(\irrep{286})+\irrep{378}+2(\irrep{448})+\irrep{729}+\irrep{924}$\\
$\irrep{189}\times\irrep{64}$ & = & $\irrep{7}+\irrep{14}+2(\irrep{27})+3(\irrep{64})+3(\irrep{77})+2(\irrep[1]{77})+3(\irrep{182})+4(\irrep{189})+\irrep{273}+3(\irrep{286})+2(\irrep{378})+4(\irrep{448})+\irrep{714}+2(\irrep{729})+\irrep{896}+2(\irrep{924})+\irrep{1547}$\\
$\irrep{189}\times\irrep[1]{77}$ & = & $\irrep{7}+\irrep{14}+\irrep{27}+2(\irrep{64})+2(\irrep{77})+\irrep[1]{77}+2(\irrep{182})+3(\irrep{189})+\irrep{273}+2(\irrep{286})+2(\irrep{378})+3(\irrep{448})+\irrep{714}+2(\irrep{729})+\irrep{896}+2(\irrep{924})+\irrep{1547}+\irrep{1728}+\irrep{2079}$\\
$\irrep{189}\times\irrep{77}$ & = & $\irrep{7}+\irrep{14}+2(\irrep{27})+3(\irrep{64})+3(\irrep{77})+2(\irrep[1]{77})+3(\irrep{182})+4(\irrep{189})+\irrep{273}+3(\irrep{286})+2(\irrep{378})+4(\irrep{448})+\irrep{714}+3(\irrep{729})+\irrep{896}+2(\irrep{924})+\irrep{1547}+\irrep{1728}$\\
$\irrep{189}\times\irrep{182}$ & = & $\irrep{7}+\irrep{14}+2(\irrep{27})+3(\irrep{64})+3(\irrep{77})+2(\irrep[1]{77})+3(\irrep{182})+5(\irrep{189})+2(\irrep{273})+4(\irrep{286})+3(\irrep{378})+5(\irrep{448})+2(\irrep{714})+4(\irrep{729})+2(\irrep{896})+4(\irrep{924})+\irrep{1254}+3(\irrep{1547})+2(\irrep{1728})+\irrep{2079}+\irrep{2926}+\irrep{3003}$\\
$\irrep{189}\times\irrep{189}$ & = & $\irrep{1}+\irrep{7}+2(\irrep{14})+3(\irrep{27})+4(\irrep{64})+4(\irrep{77})+3(\irrep[1]{77})+5(\irrep{182})+6(\irrep{189})+2(\irrep{273})+5(\irrep{286})+4(\irrep{378})+6(\irrep{448})+2(\irrep{714})+5(\irrep{729})+\irrep{748}+2(\irrep{896})+5(\irrep{924})+\irrep{1254}+3(\irrep{1547})+2(\irrep{1728})+\irrep{2079}+\irrep{2926}$\\
\end{longtable}
}
\section{Theoretical Background and Implementation}
\label{sec:TheoryAndImplementation}
In this section we give a self-contained overview of the Lie algebra theory used
and implemented in LieART. It is subdivided into parts discussing basic
properties of Lie algebras, roots, Weyl orbits, representations and decompositions.
Every subsection begins with a list of the
relevant LieART functions followed by text that introduces the necessary theory
with reference to the functions and notes on their implementation. This section
is not intended as a pedagogical introduction to Lie algebras and we refer the
reader to the excellent literature serving this purpose \cite{Slansky,
Georgi:1982jb, cahn1984semi}.
\subsection{Algebras}
\label{ssec:Algebras}
\definition{
\com{Rank[\args{expr}]} & gives the rank of the algebra of \args{expr}, which can be an irrep, a weight a root or an algebra itself.\\
\com{Algebra[\args{algebraClass}][\args{rank}]} & represents a classical algebra of the type \args{algebraClass}, which can only be \com{A}, \com{B}, \com{C} or \com{D}, with rank \args{n}.\\
\com{Algebra[\args{expr}]} & gives the algebra (classical or exceptional) of \args{expr}, which may be an irrep, a weight or a root in any basis.\\
\com{OrthogonalSimpleRoots[\args{algebra}]} & gives the simple roots of \args{algebra} in the orthogonal basis.\\
\com{CartanMatrix[\args{algebra}]} & gives the Cartan matrix of \args{algebra}.\\
\com{OmegaMatrix[\args{algebra}]} & gives the matrix of fundamental weights of \args{algebra} as rows.\\
\com{OrthogonalFundamentalWeights[\args{algebra}]} & gives the fundamental weights of \args{algebra} in the orthogonal basis.\\
\com{OrthogonalBasis[\args{expr}]} & transforms \args{expr} from any basis into the orthogonal basis.\\
\com{OmegaBasis[\args{expr}]} & transforms \args{expr} from any basis into the $\omega$-basis. \\
\com{AlphaBasis[\args{expr}]} & transforms \args{expr} from any basis into the $\alpha$-basis.\\
\com{DMatrix[\args{algebra}]} & gives a matrix with inverse length factors of simple roots on the main diagonal.\\
\com{ScalarProduct[\args{weight1},\args{weight2}]} & gives the scalar product of \args{expr1} and \args{expr2} in any basis. \args{expr1} and \args{expr2} may be weights or roots.\\
\com{MetricTensor[\args{algebra}]} & gives the metric tensor or quadratic form matrix of \args{algebra}.\\
}{Basic Algebra Properties.}
\begin{figure}[t]
\begin{center}
\includegraphics{DynkinDiagrams-crop.pdf}
\caption{\label{fig:DynkinDiagrams} Dynkin Diagrams of classical and exceptional simple Lie algebras.}
\end{center}
\end{figure}
\newcommand{\liebracket}[2]{\left[#1, #2\right]}
\subsubsection{Definition}
A \emph{Lie Algebra} is a vector space $g$ over a field $F$ with the \emph{Lie
bracket} $\left[\cdot,\cdot\right]$ as binary operation, which is bilinear,
alternating and fulfills the Jacoby identity. The Lie bracket is often refered
to as the commutator. The Lie brackets of the generators $t_i$ of the Lie
algebra are
\begin{equation}\label{eq:DefinitionStructureConstants}
\liebracket{t_i}{t_j} = f_{ijk} t_k
\end{equation}
with the so called \emph{structure constants} $f_{ijk}$, that fully determine
the algebra. A Lie algebra is called \emph{simple} when it contains no
non-trivial ideals. A \emph{semi-simple} Lie algebra is a sum of simple ones.
\subsubsection{Roots}
The generators $t_i$ of a simple Lie algebra in the Cartan-Weyl basis fall into
two sets: The so-called \emph{Cartan subalgebra}, $H$, contains all
simultaneously diagonalizable generators $h_i$, i.e., the generators are
Hermitian and mutually commute (the Cartan subalgebra is abelian):
\begin{equation}
h_i=h_i^\dagger, \qquad\liebracket{h_i}{h_j} = 0, \qquad i,j=1,\ldots,n.
\end{equation}
The number of simultaneously diagonalizable generators $n$ is called the
\emph{rank} of the algebra, and can be determined by the function
\com{Rank[\args{expr}]} in LieART. We denote all other generators as $e_\alpha$.
They satisfy $n$ eigenvalue equations with the generators of the cartan
subalgebra $h_i$:
\begin{equation}\label{eq:DefinitionRootVector}
\liebracket{h_i}{e_\alpha} = \alpha_i\,e_\alpha
\end{equation}
which is a subset of \eqref{eq:DefinitionStructureConstants} and thus the
$\alpha_i$ are structure constants, which are real numbers due to the
hermiticity of the $h_i$'s. Since the $\alpha_i$ are the solutions to the
eigenvalue equation \eqref{eq:DefinitionRootVector} the vectors
$\alpha{=}(\alpha_1,\ldots,\alpha_n)$ are called the \emph{root vectors}, which
lie in an $n$-dimensional euclidian space, called the \emph{root space}.
\emph{Roots} are functionals mapping the Cartan subalgebra $H$ onto the real
numbers (the eigenvalues), for all generators $t_i$, which also includes the
$h_i$ where the eigenvalues are zero. Thus, a Lie algebra has as many roots as
generators. The roots are labeled by the root vectors, which we will use in its
place from now on.
A zero root with an $n$-fold degeneracy is associated with the Cartan
subalgebra. In the Cartan-Weyl basis the other generators come in conjugated
pairs $e_\alpha^\dagger{=}e_{{-}\alpha}$ and correspond to the ladder operators
of \SU2. So-called \emph{positive roots} correspond to the raising operator
$e_\alpha$ and negative roots to the lowering operators $e_{{-}\alpha}$. If
$\alpha$ is a root so is ${-}\alpha$.
Some of the positive roots can be written as sum of others. Those for which this
is not possible are called \emph{simple roots} and a Lie algebra has as many
simple roots as its rank. It is clear that specifying the simple roots fully
determines a Lie algebra and thus can be used to replace
\eqref{eq:DefinitionStructureConstants}, because all structure constants can be
derived therefrom.
\subsubsection{Classification of Lie Algebras}
Using the commutation relations and the Jacoby identity to analyze the
generators, constraints on the roots can be derived and eventually all possible
root systems found, which is identical to identifying all allowed Lie algebras.
It turns out that simple roots can only come in at most two lengths in one Lie
algebra and at four different angles between any pair of them. The simple roots
are in particular not orthogonal. The so-called \emph{Dynkin diagrams} are an
ingenious way to depict these relations: simple roots are represented by dots,
which are open, {\Large $\Circle$}, for the longer roots or for all roots if
they only come in one length, and filled, {\Large $\CIRCLE$}, for the shorter
roots. Angles between two simple roots are represented by lines connecting the
dots: no line for an angle of 90\textdegree, one line for 120\textdegree, two
lines for 135\textdegree\ and three for 150\textdegree. Figure
\ref{fig:DynkinDiagrams} shows the Dynkin diagrams for all simple Lie algebras.
Semi-simple Lie algebras have disjoint parts and can thus be reduced to two
Dynkin diagrams of simple Lie algebras.
The simple Lie algebras fall into two types: four families of infinite series
algebras, \A{n}, \B{n}, \C{n} and \D{n}, also called the \emph{classical Lie
algebras} and five so-called \emph{exceptional algebras}, \E6, \E7, \E8, \F4 and
\G2, with their rank as subscript (see Table
\ref{tab:LieAlgebrasClassification}).
\begin{table}[t]
\begin{center}
\begin{tabular}{lllll}\toprule
\textbf{Type} & \textbf{Cartan} & \textbf{Name} & \textbf{Rank} & \textbf{Description}\\\midrule
classical & \A{n} & \SU{n{+}1} & $n\geq1$ & Special unitary algebras of $n{+}1$ complex dimension\\
& \B{n} & \SO{2n{+}1} & $n\geq3$ & Special orthogonal algebras of odd ($2n{+}1$) real dimension\\
& \C{n} & \Sp{2n} & $n\geq2$ & Symplectic algebras of even ($2n$) complex dimension\\
& \D{n} & \SO{2n} & $n\geq4$ & Special orthogonal algebras of even ($2n$) real dimension\\\midrule
exceptional & \E6 & \E6 & 6 & Exceptional algebra of rank 6\\
& \E7 & \E7 & 7 & Exceptional algebra of rank 7\\
& \E8 & \E8 & 8 & Exceptional algebra of rank 8\\
& \F4 & \F4 & 4 & Exceptional algebra of rank 4\\
& \G2 & \G2 & 2 & Exceptional algebra of rank 2\\
\bottomrule
\end{tabular}
\caption{\label{tab:LieAlgebrasClassification} Classification of simple Lie algebras.}
\end{center}
\end{table}
The labels are according to the classification by Cartan. The classical Lie
algebras are internally represented (i.e., in \com{FullForm}) in LieART by
\com{Algebra[\args{algebraClass}][\args{n}]}, with \args{algebraClass} being
either \com{A}, \com{B}, \com{C} or \com{D} and \args{n} being the rank. In
\com{StandardForm} the Cartan classification is explicitly displayed and in
\com{TraditionalForm} it is written by its conventional name.
\subsubsection{Bases}
\label{ssec:Bases}
With respect to the Weyl reflection group, inherent in all compact Lie algebras, as
we will explain later, it is convenient to express the root space in an
orthogonal coordinate system, which is a subspace of Euclidian space. The
specific subspace varies with the Lie algebra. For \A{n} it is a subspace of
$\mathbb{R}^{n+1}$, where the coordinates sum to one. As the simple roots define the Lie
algebra, they are explicitly specified in LieART using orthogonal coordinates
and can be retrieved by \com{OrthogonalSimpleRoots[\args{algebra}]}. E.g., the
four simple roots of \A4 (\SU5) in orthogonal coordinates are: \begin{mathin}
OrthogonalSimpleRoots[A4]//Column
\end{mathin}
\begin{mathout}
\nohangingindent
\rootorthogonal{1, {-}1, 0, 0, 0}\newline
\rootorthogonal{0, 1, {-}1, 0, 0}\newline
\rootorthogonal{0, 0, 1, {-}1, 0}\newline
\rootorthogonal{0, 0, 0, 1, {-}1}
\end{mathout}
The so-called \emph{Cartan matrix} exhibits the non-orthogonality of the simple
roots. It is defined as
\begin{equation}\label{eq:CartanMatrix}
A_{ij} = \frac{2 \scalarproduct{\alpha_i}{\alpha_j}}{\scalarproduct{\alpha_j}{\alpha_j}}\qquad i,j=1,\ldots,n
\end{equation}
where the scalar product $\scalarproduct{\cdot}{\cdot}$ is the ordinary scalar
product of $\mathbb{R}^{n+1}$ in the case of \A{n}. Most textbooks translate the
Dynkin diagrams to the corresponding Cartan matrix as a starting point. And in
fact, the rows of the Cartan matrix are the simple roots in the so-called
\emph{$\omega$-basis}, which is the bases of \emph{fundamental weights}, also
called the \emph{Dynkin basis}. (Weights will be introduced later in the context
of representations.) The Cartan matrix is implemented in LieART as the function
\com{CartanMatrix[\args{algebra}]} following the definition of
\eqref{eq:CartanMatrix}. The Cartan matrix for \A4 reads:
\begin{mathin}
CartanMatrix[A4]
\end{mathin}
\begin{mathout}
$\begin{pmatrix}
2 & {-}1 & 0 & 0 \\
{-}1 & 2 & {-}1 & 0 \\
0 & {-}1 & 2 & {-}1 \\
0 & 0 & {-}1 & 2 \\
\end{pmatrix}$
\end{mathout}
Besides the orthogonal basis, and the $\omega$-basis, the $\alpha$-basis is also
useful. As the name indicates it is the basis of simple roots and it explicitly
shows how, e.g., a root is composed out of simple roots. Neither the
$\omega$-basis nor the $\alpha$-basis is orthogonal. The Cartan matrix
mediates between the $\omega$- and $\alpha$-bases:
\begin{equation}
\alpha_i
= \sum_{j=1}^n A_{ij}\omega_j, \qquad \omega_i
= \sum_{j=1}^n (A^{{-}1})_{ij}\alpha_j.
\end{equation}
where the $\omega_i$ are the fundamental weights, which we will define later.
These bases are dual to each other in the sense that
\begin{equation}\label{eq:AlphaOmegaDual}
\frac{2\scalarproduct{\alpha_i}{\omega_j}}{\scalarproduct{\alpha_i}{\alpha_i}}
\equiv \scalarproduct{\alpha_i^\vee}{\omega_j}
= \delta_{ij},\qquad i,j=1,\ldots,n
\end{equation}
where $\alpha_i^\vee$ is the so-called \emph{coroot} of $\alpha_i$ defined as
\begin{equation}
\alpha^\vee = \frac{2\alpha}{\scalarproduct{\alpha}{\alpha}}.
\end{equation}
The transformation to the orthogonal basis can be derived from
\eqref{eq:AlphaOmegaDual}: Expressing $\alpha_i$ and $\omega_j$ in orthogonal
coordinates as $\hat\alpha_i$ and $\hat\omega_j$\eqref{eq:AlphaOmegaDual} reads
\begin{equation}\label{eq:AlphaOmegaDualOrthogonal}
\frac{2 \hat\alpha_i\cdot\hat\omega_j}{\hat\alpha_i\cdot\hat\alpha_i}
\equiv \hat\alpha_i^\vee\cdot\hat\omega_j
= \delta_{ij},\qquad i,j=1,\ldots,n
\end{equation}
using the ordinary scalar product of $\mathbb{R}^m$, where $m$ is the dimension
of the orthogonal subspace. Using the matrices $\matrixhat{A}$ and
$\matrixhat\Omega$ with the simple \emph{co}roots $\hat\alpha_i^\vee$ and the
fundamental weights $\hat\omega_j$ as rows, we can write
\eqref{eq:AlphaOmegaDualOrthogonal} as the matrix equation:
\begin{equation}\label{eq:AlphaOmegaDualOrthogonalMatrices}
\matrixhat{A}\transpose{\matrixhat{\Omega}} = I_n
\end{equation}
where both $\matrixhat{A}$ and $\matrixhat\Omega$ are $n{\times}m$ matrices.
Please note that the dimension of the orthogonal space $m$ is not necessarily
the same as the rank of the algebra $n$. These exceptions are: \A{n} with
$m{=}n+1$, \E6 with $m{=}8$, \E7 with $m{=}8$ and \G2 with $m{=}3$. For all others
$m{=}n$ holds. The matrix of the simple coroots in the orthogonal basis
$\matrixhat{A}$ is easily calculated from the simple roots given in LieART, but
the matrix of fundamental weights in the orthogonal basis $\matrixhat{\Omega}$
must be determined by \eqref{eq:AlphaOmegaDualOrthogonalMatrices}. In the cases
where $\matrixhat{A}$ is not a square matrix its inverse does not exist. Because
the rows of $\matrixhat{A}$, which are the simple coroots in the orthogonal
basis, are linear independent, $\matrixhat{A}\transpose{\matrixhat{A}}$ is
invertible and the so-called right-inverse $\matrixhat{A}^{\!+}$ can be found via
\begin{equation}
\matrixhat{A}^{\!+}
= \transpose{\matrixhat{A}}(\matrixhat{A}\transpose{\matrixhat{A}})^{{-}1}
\end{equation}
which satisfies: $\matrixhat{A}\matrixhat{A}^{\!+}{=}I_n$, i.e., the
matrix $\transpose{\matrixhat{\Omega}}$ can be identified with
$\matrixhat{A}^{\!+}\!$, in other words the fundamental weights as rows of
$\matrixhat{\Omega}$ in terms of simple coroots as rows of $\matrixhat{A}$ are
\begin{equation}\label{eq:FundamentalWeights}
\matrixhat{\Omega} = \transpose{(\matrixhat{A}^{\!+}\!)}
= (\matrixhat{A}\transpose{\matrixhat{A}})^{{-}1}\matrixhat{A}
\end{equation}
The Mathematica built-in function \com{PseudoInverse[\args{matrix}]} yields the
right-inverse for our case of a \args{matrix} with linear independent rows, i.e.,
the implementation of the second equality in \eqref{eq:FundamentalWeights} is not
needed. The matrix of the fundamental weights $\matrixhat{\Omega}$ is implemented as
\com{OmegaMatrix[\args{algebra}]}, e.g., for \A4:
\pagebreak
\begin{mathin}
OmegaMatrix[A4]
\end{mathin}
\begin{mathout}
\setlength\extrarowheight{2pt}
$\begin{pmatrix}
\frac{4}{5} & {-}\frac{1}{5} & {-}\frac{1}{5} & {-}\frac{1}{5} & {-}\frac{1}{5} \\
\frac{3}{5} & \frac{3}{5} & {-}\frac{2}{5} & {-}\frac{2}{5} & {-}\frac{2}{5} \\
\frac{2}{5} & \frac{2}{5} & \frac{2}{5} & {-}\frac{3}{5} & {-}\frac{3}{5} \\
\frac{1}{5} & \frac{1}{5} & \frac{1}{5} & \frac{1}{5} & {-}\frac{4}{5}
\end{pmatrix}$
\end{mathout}
and the function \com{OrthogonalFundamentalWeights[\args{algebra}]} adds the proper heads
to the rows of $\matrixhat{\Omega}$, to identify them as weights is the orthogonal basis.
We will discuss (fundamental) weights in Section \ref{ssec:Representations} in more detail.
The matrix of the fundamental weights in the orthogonal basis $\matrixhat\Omega$
mediates between the $\omega$-basis and the orthogonal basis:
\begin{equation}
\omega_i = \sum_{j=1}^n \matrixhat\Omega_{ij}e_j, \qquad e_i
= \sum_{j=1}^n (\matrixhat\Omega^{{-}1})_{ij}\omega_j.
\end{equation}
The LieART functions \com{AlphaBasis[\args{weightOrRoot}]},
\com{OmegaBasis[\args{weightOrRoot}]} and \linebreak
\com{OrthogonalBasis[\args{weightOrRoot}]} transform \args{weightOrRoot} from
any basis into the $\alpha$-basis, the $\omega$-basis and the
orthogonal basis, respectively. It is obvious how the simple roots in the
$\alpha$-basis look:
\begin{mathin}
AlphaBasis[OrthogonalSimpleRoots[A4]]//Column
\end{mathin}
\begin{mathout}
\nohangingindent
\rootorthogonal{1, 0, 0, 0}\newline
\rootorthogonal{0, 1, 0, 0}\newline
\rootorthogonal{0, 0, 1, 0}\newline
\rootorthogonal{0, 0, 0, 1}
\end{mathout}
and likewise the fundamental weights in the $\omega$-basis:
\begin{mathin}
OmegaBasis[OrthogonalFundamentalWeights[A4]]//Column
\end{mathin}
\begin{mathout}
\label{out:FundamentalWeightsOmegaBasis}
\nohangingindent
\weight{1, 0, 0, 0}\newline
\weight{0, 1, 0, 0}\newline
\weight{0, 0, 1, 0}\newline
\weight{0, 0, 0, 1}
\end{mathout}
A root in LieART is represented by three different heads: \com{RootOrthogonal[\args{algebraClass}][\args{label}]} for a
root in the orthogonal basis, \com{RootOmega[\args{algebraClass}][\args{label}]}
in the $\omega$-basis and in the $\alpha$-basis by \com{RootAlpha[\args{algebraClass}][\args{label}]}. The \args{algebraClass} can only be \com{A}, \com{B},
\com{C} or \com{D} to indicate a classical Lie algebra or \com{E6}, \com{E7},
\com{E8}, \com{F4} or \com{G2} for the exceptionals. The \args{label} stands for
the comma-separated coordinates. This form of the roots is displayed in
\com{InputForm} and \com{FullForm}. E.g., the first simple root of \A4 in all
three bases reads:
\begin{mathin}
\{\slot,OmegaBasis[\slot],AlphaBasis[\slot]\}\&@First[OrthogonalSimpleRoots[A4]]//InputForm
\end{mathin}
\begin{mathout}
\{RootOrthogonal[A][1,{-}1,0,0,0], RootOmega[A][2,{-}1,0,0], RootAlpha[A][1,0,0,0]\}
\end{mathout}
\subsubsection{Scalar Product}
The standard choice for the length factors $\scalarproduct{\alpha_j}{\alpha_j}$
in \eqref{eq:CartanMatrix} is 2 for the longer roots, if there are two root
lengths. The factors $2/\scalarproduct{\alpha_j}{\alpha_j}$ can only take three
different values which are: 1 for all roots of \A{n}, \D{n}, \E6, \E7, \E8 and
for the long roots of \B{n}, \C{n}, \F4 and \G2; 2 for the short roots of \B{n},
\C{n} and \F4 and 3 for the short root of \G2. Their implementation in LieART
is in the form of diagonal matrices with the inverse factors for the simple roots
corresponding to the row on the main diagonal, i.e.,
\begin{equation}
D=\text{diag}\left(\frac{1}{2}\scalarproduct{\alpha_1}{\alpha_1},\ldots, \frac{1}{2}\scalarproduct{\alpha_n}{\alpha_n}\right)
\end{equation}
as defined in \cite{klimyk_orbit_2006}. E.g., for \F4, to avoid a trivial
example, we have:
\pagebreak
\begin{mathin}
DMatrix[F4]
\end{mathin}
\begin{mathout}
$\begin{pmatrix}
1 & 0 & 0 & 0 \\
0 & 1 & 0 & 0 \\
0 & 0 & \frac{1}{2} & 0 \\
0 & 0 & 0 & \frac{1}{2} \\
\end{pmatrix}$
\end{mathout}
In the $\omega$-basis the scalar product used in \eqref{eq:CartanMatrix} becomes:
\begin{equation}
\scalarproduct{x}{y} = \sum_{i,j}^n x_i (A^{{-}1})_{ij}D_j y_j = \sum_{i,j}^n x_i G_{ij} y_j
\end{equation}
where the $x_i$ and $y_j$ are coordinates of $x$ and $y$ in the $\omega$-basis. The matrix
\begin{equation}
G_{ij} = (A^{{-}1})_{ij} \frac{\scalarproduct{\alpha_j}{\alpha_j}}{2} = (A^{{-}1})_{ij} D_j
\end{equation}
is called \emph{quadratic form matrix} or \emph{metric tensor} of the Lie
algebra. The scalar product is available in LieART as
\com{ScalarProduct[\args{weightOrRoot1},\args{weightOrRoot2}]}, where \args{weightOrRoot1} and
\args{weightOrRoot2} may be roots or weights in the orthogonal basis, the
$\alpha$-basis or the $\omega$-basis. The function recognizes the basis by the heads
of \args{weightOrRoot1} and \args{weightOrRoot2}. The LieART function for the metric tensor $G$ is
\com{MetricTensor[\args{algebra}]}, e.g., for \A4:
\begin{mathin}
MetricTensor[A4]
\end{mathin}
\begin{mathout}
\setlength\extrarowheight{2pt}
$\begin{pmatrix}
\frac{4}{5} & \frac{3}{5} & \frac{2}{5} & \frac{1}{5} \\
\frac{3}{5} & \frac{6}{5} & \frac{4}{5} & \frac{2}{5} \\
\frac{2}{5} & \frac{4}{5} & \frac{6}{5} & \frac{3}{5} \\
\frac{1}{5} & \frac{2}{5} & \frac{3}{5} & \frac{4}{5} \\
\end{pmatrix}$
\end{mathout}
\subsubsection{Representation}
\label{ssec:Representations}
A \emph{representation} is a linear map of the Lie algebra into the general
linear group, i.e., the matrix group, that preserves the Lie bracket relations.
It is a homomorphism that maps the generators $t_i$ onto invertible matrices
$T_i$, that satisfy the same ``commutation'' relations as the Lie algebra,
namely
\begin{equation}\label{eq:DefinitionRepresentation}
\liebracket{T_i}{T_j} = f_{ijk} T_k,
\end{equation}
where the $\left[\cdot,\cdot\right]$ is now the commutator.
Points in the vector space that the matrices act on can be labeled by the set of
eigenvalues of the matrices representing the generators of the Cartan
subalgebra. Such a set of eigenvalues is called a \emph{weight vector}, and the
associated functional \emph{weight}, denoted by $\lambda$. They are defined in
root space which is called \emph{weight space} in this context. The weights and
weight vectors of a representation correspond to roots and root vectors of the
algebra. In fact, weights can be expressed as rational linear combinations of
roots, and, as pointed out in this section, eventually by simple roots. In
particular, the structure functions themselves form a representation of the
algebra: the \emph{adjoint representation}, which has the same dimension as the
algebra, namely the number of roots.
\pagebreak
\subsection{Weyl Group Orbits}
\definition{
\com{Reflect[\args{weightOrRoot},\args{simpleroots}]} & reflects \args{weightOrRoot} at the hyperplanes orthogonal to the specified \args{simpleroots}.\\
\com{Reflect[\args{weightOrRoot}]} & reflects \args{weightOrRoot} at the hyperplanes orthogonal to all simple roots of the Lie algebra of \args{weightOrRoot}.\\
\com{ReflectionMatrices[\args{algebra}]} & gives the reflection matrices of the Weyl group of \args{algebra}.\\
\com{Orbit[\args{weightOrRoot},\args{simpleroots}]} & generates the Weyl group orbit of \args{weightOrRoot} using only the specified \args{simpleroots}.\\
\com{Orbit[\args{weightOrRoot}]} & generates the full Weyl group orbit of \args{weightOrRoot} using all simple roots of the Lie algebra of \args{weightOrRoot}.\\
\com{DimOrbit[\args{weightOrRoot},\args{simpleroots}]} & gives the size of the orbit of \args{weightOrRoot} using only the \args{simpleroots}.\\
\com{DimOrbit[\args{weightOrRoot}]} & gives the size of the orbit of \args{weightOrRoot} using all simple roots of the Lie algebra of \args{weightOrRoot}.\\
}{Weyl Group Orbits}
The finite group $W(L)$, called the Weyl group of the Lie algebra $L$, is a
reflection group inherent in the root systems of all simple Lie algebras. The
Coxeter groups are an abstraction of reflection groups and the so-called
\emph{Coxeter-Dynkin diagram} describing Coxeter groups are closely related to
the Dynkin diagrams presented here. In fact the Coxeter-Dynkin diagram
corresponding to the Dynkin diagram describes the Weyl group of the Lie algebra.
The transformations $r_i$ generating the Weyl group are reflections of a vector $x$
in root space at the hyperplanes orthogonal to the simple roots $\alpha_i$
of the Lie algebra defined by
\begin{equation}\label{eq:WeylReflection}
r_i x = x - \frac{2\scalarproduct{x}{\alpha_i}}{\scalarproduct{\alpha_i}{\alpha_i}}\alpha_i,\qquad i=1,\ldots,n,\qquad x\in \mathbb{R}^n.
\end{equation}
The LieART function \com{Reflect[\args{weightOrRoot},\args{simpleroots}]}
implements the reflections $r_i$ with \args{weightOrRoot} as $x$ and
\args{simpleroots} as a list of simple roots $\alpha_i$. The result is a list of
weights, because the reflection is preformed with several roots simultaneously.
If \args{weightOrRoots} are in the orthogonal basis and ought to be reflected
using all roots, the function pattern is \com{Reflect[\args{weightOrRoot}]},
without the simple roots as second argument. Instead of the definition with
scalar products following \eqref{eq:WeylReflection}, the implementation
multiplies the orthogonal coordinates with precomputed reflection matrices,
which have a simple form in the orthogonal basis. The function computing the
reflection matrices is \com{ReflectionMatrices[\args{algebra}]} and simply
applies the built-in Mathematica command \com{ReflectionMatrix} to all simple
roots and saves the result as \com{DownValues} of \linebreak \com{ReflectionMatrices[\args{algebra}]}.
E.g., the reflection matrices for \A4 (in the 5-dimensional orthogonal basis) are:
\begin{mathin}
Row[MatrixForm /@ ReflectionMatrices[A4]]
\end{mathin}
\begin{mathout}
$\begin{pmatrix}
0 & 1 & 0 & 0 & 0 \\
1 & 0 & 0 & 0 & 0 \\
0 & 0 & 1 & 0 & 0 \\
0 & 0 & 0 & 1 & 0 \\
0 & 0 & 0 & 0 & 1 \\
\end{pmatrix}
\begin{pmatrix}
1 & 0 & 0 & 0 & 0 \\
0 & 0 & 1 & 0 & 0 \\
0 & 1 & 0 & 0 & 0 \\
0 & 0 & 0 & 1 & 0 \\
0 & 0 & 0 & 0 & 1 \\
\end{pmatrix}
\begin{pmatrix}
1 & 0 & 0 & 0 & 0 \\
0 & 1 & 0 & 0 & 0 \\
0 & 0 & 0 & 1 & 0 \\
0 & 0 & 1 & 0 & 0 \\
0 & 0 & 0 & 0 & 1 \\
\end{pmatrix}
\begin{pmatrix}
1 & 0 & 0 & 0 & 0 \\
0 & 1 & 0 & 0 & 0 \\
0 & 0 & 1 & 0 & 0 \\
0 & 0 & 0 & 0 & 1 \\
0 & 0 & 0 & 1 & 0 \\
\end{pmatrix}$
\end{mathout}
The Weyl group of \A{n} is particularly simple in the orthogonal basis: It is
the symmetric group $S_{n+1}$. The reflection matrices for \A4 above represent
the generators of $S_5$, i.e., the coordinate permutations (12), (23), (34) and
(45), respectively.
Acting on a vector $x$ in root space by all elements of the Weyl group gives a
set of points, of which some may coincide. The subset of distinct points is
called the \emph{orbit} of $x$ and denoted as $O(x)$. The LieART function
\com{Orbit[\args{weightOrRoot},\args{simpleroots}]} gives the orbit of
\args{weightOrRoot} using the \args{simpleroots}. If the second argument is
omitted, all simple roots of the algebra associated with \args{weightOrRoot} are
used. The function applies \com{Reflect} in a nested fashion and removes
duplicate points in every step. The orbit of an \A{n} root or weight is
constructed in a special way for performance reasons: The \args{weightOrRoot} is
transformed to the orthogonal basis and the other points of its orbit are
constructed by permuting its coordinates using the built-in Mathematica function
\com{Permutations}. For example, the orbit of the first simple root of \A4 is
\begin{mathin}
Orbit[First[OrthogonalSimpleRoots[A4]]]
\end{mathin}
\begin{mathout}
\label{out:A4RootOrbit}
\{%
\rootorthogonal{{-}1, 0, 0, 0, 1},%
\rootorthogonal{{-}1, 0, 0, 1, 0},%
\rootorthogonal{{-}1, 0, 1, 0, 0},%
\rootorthogonal{{-}1, 1, 0, 0, 0},%
\rootorthogonal{0, {-}1, 0, 0, 1},\newline
\rootorthogonal{0, {-}1, 0, 1, 0},%
\rootorthogonal{0, {-}1, 1, 0, 0},%
\rootorthogonal{0, 0, {-}1, 0, 1},%
\rootorthogonal{0, 0, {-}1, 1, 0},%
\rootorthogonal{0, 0, 0, {-}1, 1},\newline
\rootorthogonal{0, 0, 0, 1, {-}1},%
\rootorthogonal{0, 0, 1, {-}1, 0},%
\rootorthogonal{0, 0, 1, 0, {-}1},%
\rootorthogonal{0, 1, {-}1, 0, 0},%
\rootorthogonal{0, 1, 0, {-}1, 0},\newline
\rootorthogonal{0, 1, 0, 0, {-}1},%
\rootorthogonal{1, {-}1, 0, 0, 0},%
\rootorthogonal{1, 0, {-}1, 0, 0},%
\rootorthogonal{1, 0, 0, {-}1, 0},%
\rootorthogonal{1, 0, 0, 0, {-}1}%
\}
\end{mathout}
which is in fact the \A4 root system without the zero roots.
With the same set of Weyl group generators, defined by the roots used, every
vector is uniquely associated with only one orbit. In turn every element of an
orbit allows us to generate the entire orbit by reflecting at the hyperplanes
defined by the roots. The hyperplanes divide the space into so-called \emph{Weyl
chambers}. An orbit has no more than one distinct element in every chamber and the Weyl
group permutes the chambers. The so-called \emph{dominant chamber} has elements
with only positive coordinates in the $\omega$-basis, which serves us as a
definite element for the orbits associated with them. The test function
\com{DominantQ[\args{weightOrRoot}]} gives \com{True} if \args{weightOrRoot} is
in the dominant chamber and \com{False} otherwise. The dominant root of
\outref{out:A4RootOrbit} in the $\omega$-basis is
\begin{mathin}
OmegaBasis[Select[\%, DominantQ]]
\end{mathin}
\begin{mathout}
$\left\{\rootomega{1, 0, 0 ,1}\right\}$
\end{mathout}
(Roots and weights in the $\omega$-basis are displayed with framed boxes
following the notation of most textbooks.) If an orbit is created by LieART it
is saved as a \com{DownValue} of \com{Orbit} associated with its dominant root
or weight. Whenever an orbit of a non-dominant weight or root is needed, LieART
first seeks the \com{DownValue}s of \com{Orbit} for the weight or root, to see
if the orbit has already been generated. Reusing computed orbits saves CPU time
especially for Lie algebras other than \A{n} and the described procedure avoids
saving the same orbit multiple times as \com{DownValue} involving different
roots or weights.
The size of the orbit, i.e., its numbers of elements, denoted by
$\left|O(x)\right|$, is implemented as the function
\com{DimOrbit[\args{weightOrRoot},\args{simpleroots}]} or
\com{DimOrbit[\args{weightOrRoot}]} if all simple roots of the associated Lie
algebras should be used. The size of the orbit in \outref{out:A4RootOrbit} is
\begin{mathin}
DimOrbit[First[OrthogonalSimpleRoots[A4]]]
\end{mathin}
\begin{mathout}
20
\end{mathout}
\subsection{Roots}
\definition{
\com{RootSystem[\args{algebra}]} & Root system of \args{algebra}\\
\com{ZeroRoots[\args{algebra}]} & Zero roots associated with the Cartan subalgebra of \args{algebra}\\
\com{Height[\args{root}]} & Height of a \args{root} within the root system\\
\com{HighestRoot[\args{algebra}]} & Highest root of the root system of \args{algebra}\\
\com{PositiveRootQ[\args{root}]} & Gives \com{True} if \args{root} is a positive root\\
\com{NumberOfPositiveRoots[\args{algebra}]} & Number of positive roots of \args{algebra}\\
\com{PositiveRoots[\args{algebra}]} & Gives only the positive roots of \args{algebra}\\
}{Roots}
The roots of a Lie algebra can be built from the simple roots. There are two
traditional approaches: (1)~building the roots from linear combinations of
simple roots. Since not all linear combinations of simple roots are roots, the
difficulty lies in filtering out combinations that are roots. (2)~Starting from
a \emph{highest root} the roots can be constructed by subtracting simple roots.
LieART uses yet another approach: It builds the orbits of the simple roots by
applying the Weyl group of the Lie algebra and adds the n-fold degenerated zero
roots corresponding to the Cartan subalgebra. The simple roots of the same
length belong to the same orbit, e.g., for \A{n} there is only one orbit besides
the zero orbit (see \outref{out:A4RootOrbit}). Nevertheless, the orbits of all
simple roots are generated and then united. The fact that non-zero roots are
non-degenerate allows us to remove duplicate roots obtained by the described
procedure.
The function \com{RootSystem[\args{algebra}]} constructs the root system by the
procedure described above. As an non-trivial example we demonstrate the
procedure on \G2, which has two non-trivial orbits and the zero orbit: The two simple roots of \G2
\begin{mathin}
OmegaBasis[OrthogonalSimpleRoots[G2]]
\end{mathin}
\begin{mathout}
$\left\{\rootomega{2, {-}1},\rootomega{{-}3, 2}\right\}$
\end{mathout}
have different lengths:
\begin{mathin}
DMatrix[G2]
\end{mathin}
\begin{mathout}
$\begin{pmatrix}
\frac{1}{3} & 0 \\
0 & 1 \\
\end{pmatrix}$
\end{mathout}
Generating the Weyl group orbits of each of the simple roots
\begin{mathin}
Orbit /@ OmegaBasis[OrthogonalSimpleRoots[G2]]
\end{mathin}
\begin{mathout}
$\begin{pmatrix}
\rootomega{-2, 1} & \rootomega{-1, 0} & \rootomega{-1, 1} & \rootomega{1, -1} & \rootomega{1, 0} & \rootomega{2, -1}\\
\rootomega{-3, 1} & \rootomega{-3, 2} & \rootomega{0, -1} & \rootomega{0, 1} & \rootomega{3, -2} & \rootomega{3, -1}\\
\end{pmatrix}$
\end{mathout}
and adding the twofold degenerated zero roots constructed by \com{ZeroRoots[\args{algebra}]}
\begin{mathin}
ZeroRoots[G2]
\end{mathin}
\begin{mathout}
$\left\{\rootomega{0, 0},\rootomega{0, 0}\right\}$
\end{mathout}
yields the full \G2 root system, displayed in spindle shape
\begin{mathin}
RootSystem[G2, SpindleShape -> True]
\end{mathin}
\begin{mathout}
\begin{minipage}{0.8in}
\begin{center}
\rootomega{0, 1}\linebreak
\rootomega{3, {-}1}\linebreak
\rootomega{1, 0}\linebreak
\rootomega{{-}1, 1}\linebreak
\rootomega{{-}3, 2}%
\rootomega{2, {-}1}\linebreak
\rootomega{0, 0}%
\rootomega{0, 0}\linebreak
\rootomega{{-}2, 1}%
\rootomega{3, {-}2}\linebreak
\rootomega{1, {-}1}\linebreak
\rootomega{{-}1, 0}\linebreak
\rootomega{{-}3, 1}\linebreak
\rootomega{0, {-}1}
\end{center}
\end{minipage}
\end{mathout}
where a row stands for the same height of the roots. The \emph{height} of a root
is defined as the sum of coefficients in its linear combination of simple roots,
i.e., the sum of coordinates in the $\alpha$-basis. It is implemented by
\com{Height[\args{root}]}. The \emph{highest root} has the largest height,
implemented in LieART as \com{HighestRoot[\args{algebra}]}, which simply returns
the first root of the root system, since the latter is sorted by the height of
the root decreasingly. E.g. for \G2:
\begin{mathin}
HighestRoot[G2]
\end{mathin}
\begin{mathout}
\rootomega{0, 1}
\end{mathout}
The \emph{positive roots} are the roots that are only positive linear
combinations of simple roots, i.e., the coordinates in the $\alpha$-basis are
all positive, with at least one being non-zero, to exclude the zero roots. The
function \com{PositiveRootQ[\args{root}]} tests if \args{root} is positive. The
\args{root} may be in any basis and will be transformed into the $\alpha$-basis,
where its coordintes are tested accordingly. The number of positive roots are
explicitly stated as \com{NumberOfPositiveRoots[\args{algebra}]} in LieART. It
serves as a limiter to the nested reflections for the generation of Weyl group
orbits. There is a theorem stating that the maximum number of reflections
building an element of the Weyl group is equal to the number of positive roots of
the corresponding Lie algebra.
Since the root system is sorted by height, the positive roots come first.
\com{PositiveRoots[\args{algebra}]} extracts only those with the use of
\com{NumberOfPositiveRoots[\args{algebra}]}. E.g., for \G2:
\begin{mathin}
PositiveRoots[G2]
\end{mathin}
\begin{mathout}
$\left\{\rootomega{0, 1},\rootomega{3, -1},\rootomega{1, 0},\rootomega{-1, 1},\rootomega{-3, 2},\rootomega{2, -1}\right\}$
\end{mathout}
\subsection{Representations}
\definition{
\com{WeightOrthogonal[\args{algebraClass}][\args{label}]} & Weight in the orthogonal basis defined by its algebra \args{algebraClass} and Dynkin \args{label}\\
\com{WeightAlpha[\args{algebraClass}][\args{label}]} & Weight in the $\alpha$- basis defined by its algebra \args{algebraClass} and Dynkin \args{label}\\
\com{Weight[\args{algebraClass}][\args{label}]} & Weight in the $\omega$- basis defined by its algebra \args{algebraClass} and Dynkin \args{label}\\
\com{Irrep[\args{algebraClass}][\args{label}]} & Irrep descibed by its algebra \args{algebraClass} and Dynkin \args{label}\\
\com{WeightLevel[\args{weight},\args{irrep}]} & Level of the \args{weight} within the \args{irrep}\\
\com{Height[\args{irrep}]} & Height of \args{irrep}\\
\com{SingleDominantWeightSystem[\args{irrep}]} & Dominant weights of \args{irrep} without their multiplicities\\
\com{WeightMultiplicity[\args{weight},\args{irrep}]} & Computes the multiplicity of \args{weight} within \args{irrep}\\
\com{DominantWeightSystem[\args{irrep}]} & Dominant weights of \args{irrep} with their multiplicities\\
\com{WeightSystem[\args{irrep}]} & Full weight system of \args{irrep}\\
\com{Irrep[\args{algebra}][\args{dimname}]} & Irrep entered by its \args{algebra} and \args{dimname}\\
\com{ProductIrrep[\args{irreps}]} & Head of product \args{irreps}\\
\com{Delta[\args{algebra}]} & Half the sum of positive roots of \args{algebra} ($\delta{=}\dynkincomma{1,1,\ldots}$)\\
\com{WeylDimensionFormula[\args{algebra}]} & Explicit Weyl dimension formula for \args{algebra}\\
\com{Dim[\args{irrep}]} & Dimension of \args{irrep}\\
\com{DimName[\args{irrep}]} & Dimensional name of \args{irrep}\\
\com{Index[\args{irrep}]} & Index of \args{irrep}\\
\com{CongruencyClass[\args{irrep}]} & Congruency class number of \args{irrep}\\
}{Basic Properties of Irreps}
As explained in Section \ref{ssec:Algebras} a \emph{representation} is a set of
matrices that satisfies the same commutation relations as the algebra. Each of
the matrices can be labeled by the \emph{weight vector} with the eigenvalues of
the matrices corresponding to the generators of the Cartan subalgebra, and we
will refer to the weight vector simply as \emph{weight}. The weight vector has
the dimension of the Cartan subalgebra, i.e., the rank of the algebra, and not
the dimension of the space the matrices act on. The latter depends on the
particular representation.
The weights $\lambda$ can be written as linear combination of simple roots and a
crucial theorem states that the so-called \emph{Dynkin labels} $a_i$ defined as
\begin{equation}
a_i = \frac{2\scalarproduct{\lambda}{\alpha_i}}{\scalarproduct{\alpha_i}{\alpha_i}},\qquad i=1,\ldots,n
\end{equation}
are integers for all simple roots $\alpha_i$. (Please note that this is also
true if $\lambda$ is replaced by any simple root, since this constitutes an
element of the Cartan matrix as defined in \eqref{eq:CartanMatrix}.) The Dynkin
labels are in particular used to label weights (and roots). The smallest
non-zero weights with $a_i\geq 0$ are called the \emph{fundamental weights}
$\omega_i$. They define the $\omega$-basis or Dynkin basis already introduced.
They are implemented in LieART as
\com{OrthogonalFundamentalWeights[\args{algebra}]} in the orthogonal basis and
we have given an example for \A4 in \outref{out:FundamentalWeightsOmegaBasis}.
The Dynkin labels $a_i$ of a weight $\lambda$ are the coefficients of its linear
combination of fundamental weights, i.e., the $a_i$ are the coordinates in the
$\omega$-basis, which can be displayed as a row vector with comma separated
entries or as a framed box following the convention of some textbooks:
\begin{equation}
\lambda = \sum_{i=1}^n a_i\omega_i
= \dynkincomma{a_1, a_2, \ldots, a_n}
= \weight{a_1, a_2, \ldots, a_n}.
\end{equation}
A weight in LieART is represented by three different heads, depending on its
basis, in analogy with the roots: \com{WeightOrthogonal[\args{algebraClass}][\args{label}]}
for a weight in the orthogonal basis, in the $\alpha$-basis
\com{WeightAlpha[\args{algebraClass}][\args{label}]} and
simply \com{Weight[\args{algebraClass}][\args{label}]} in the $\omega$-basis,
where we omit the explicit ``\com{Omega}'' for brevity, because the
$\omega$-basis is the natural basis for weights. (The same can be said for the
$\alpha$-basis for roots, favoring the shorter head \com{Root} instead of
\com{RootAlpha} in the $\alpha$-basis. Unfortunately this would clash with the
built-in Mathematica function \com{Root[\args{f},\args{k}]} representing the
\args{k}th root of a polynomial equation defined by $f[x]=0$.) The
\args{algebraClass} can only be \com{A}, \com{B}, \com{C} or \com{D} to indicate
a classical Lie algebra or \com{E6}, \com{E7}, \com{E8}, \com{F4} or \com{G2}
for the exceptionals. The \args{label} stands for the comma-separated
coordinates. This form of the weight is displayed in \com{InputForm} and
\com{FullForm}. E.g., the first fundamental weight of \A4 in all three bases
reads:
\begin{mathin}
\{\slot,AlphaBasis[\slot],OmegaBasis[\slot]\}\&\newline
@First[OrthogonalFundamentalWeights[A4]]//InputForm
\end{mathin}
\begin{mathout}
\{WeightOrthogonal[A][4/5,{-}1/5,{-}1/5,{-}1/5,{-}1/5], WeightAlpha[A][4/5,3/5,2/5,1/5], Weight[A][1,0,0,0]\}
\end{mathout}
Since weights are linear combinations of roots, many properties of roots
translate to weights. The Weyl group also applies to weights and weight space is
also divided into Weyl chambers. A weight with only positive coordinates lies in
the dominant Weyl chamber and is called a \emph{dominant weight}. In analogy
with the highest root, every irreducible representation (irrep) has
non-degenerate \emph{highest weight}, denoted as $\Lambda$, which is also a
dominant weight, but not necessarily the only dominant weight of the irrep. The
weights system of the irrep can be computed from the highest weight $\Lambda$ by
subtracting simple roots. Thus, a highest weight $\Lambda$ uniquely defines the
irrep, and since a particular Lie algebra has infinitely many irreps, it serves
as a label for the irrep itself using the same denotation, $\Lambda$.
In LieART an irrep is represented by
\com{Irrep[\args{algebraClass}][\args{label}]}, where \args{algebraClass}
defines the Lie algebra class in the same manner as for weights and roots, and
\args{label} is the comma-separated label of the highest weight of the irrep.
E.g., the 10 dimensional irrep of \A4 has the highest weight
\dynkincomma{0,1,0,0} and thus the irrep can be entered as
\com{Irrep[A][0,1,0,0]}.
The so-called \emph{Dynkin label} of an irrep is similar to the notation of a
weight, but since the highest weight has only positive label entries the commas
between them can be ommitted, as long as this is unambiguous. The Dynkin label
in LieART is displayed in the \com{StandardForm}, e.g., the \irrep{10} of \A4:
\begin{mathin}
Irrep[A][0,1,0,0]//StandardForm
\end{mathin}
\begin{mathout}
\dynkin{0,1,0,0}
\end{mathout}
If at least one of the entries in the Dynkin labels has more than a single
digit, all entries are separated by commas to avoid ambiguities, which is the
standard textbook convention:
\begin{mathin}
Irrep[A][0, 10, 3, 1] // StandardForm
\end{mathin}
\begin{mathout}
\dynkincomma{0,10,3,1}
\end{mathout}
\subsubsection{Weight System}
The conventional approach to computing all weights of an irrep is to subtract
simple roots from the highest weight $\Lambda$ that defines the irrep. The Dynkin
label of the highest weight $\Lambda=\dynkincomma{a_1, a_2, \ldots, a_n}$
reveals how many times each simple root can be subtracted: the $i$th root can be
subtracted $a_i$ times. The \emph{level} of a weight is the number of simple
root that need to be subtracted from the highest weight to obtain it. A weight
may be obtained by different subtraction routes, but it always involves the same
number of simple roots, thus its level is unique. As explained earlier, the
$\alpha$-basis exhibits the coefficients of the linear combination of simple
roots, which are rational numbers in general. The difference between these
coefficients of the weight and the highest weight show how many times each
simple roots has been subtracted from the latter. The sum over these
differences, for each simple root, gives the level of the weight:
\begin{equation}
L(\lambda, \Lambda) = \sum_{i=1}^n \left(\bar{\Lambda}_i-\bar{\lambda}_i\right)
\end{equation}
where $\lambda=\dynkincomma{\bar{\lambda}_1,\ldots,\bar{\lambda}_n}$ and
$\Lambda=\dynkincomma{\bar{\Lambda}_1,\ldots,\bar{\Lambda}_n}$ is the weight and
highest weight in the $\alpha$-basis, respectively. The LieART function
\com{WeightLevel[\args{weight},\args{irrep}]} implements this procedure. The
highest level of an irrep is called its \emph{height}. Please note that the
highest weight has the lowest level, which is zero. The weight with the highest
level has the coefficients of the highest weight in the alpha basis, with
negative sign and rearranged (if the irrep is complex), i.e., the sum over them
is the negative of the sum of the highest weight. Thus, the height of an irrep
with highest weight $\Lambda$ is
\begin{equation}
T(\lambda, \Lambda) = 2 \sum_{i=1}^n \bar{\Lambda}_i,
\end{equation}
which is available in LieART as \com{Height[\args{irrep}]}.
The algorithm used in LieART is an implementation of the scheme developed in \cite{Moody:1982} to compute the
weight system. It deviates from the traditional procedure describes above for
performance reasons. Some weights of an irrep may be degenerated and the
procedure with subtracting simple roots only yields an upper limit of this
degeneracy, which is the number of subtraction routes that lead to a weight. To
compute the \emph{multiplicity} $m_\lambda$ of a weight $\lambda$ of the irrep
with highest weight $\Lambda$, the so-called \emph{Freudenthal recursion formula}
is usually used:
\begin{equation}\label{eq:FreudenthalFormula}
2\sum_{\alpha\in\Delta^{\!+}\!}\,\sum_{k\geq0}\scalarproduct{\lambda{+}k\alpha}{\alpha}m_{\lambda+k\alpha}
= \left[\scalarproduct{\Lambda{+}\delta}{\Lambda{+}\delta}-\scalarproduct{\lambda{+}\delta}{\lambda{+}\delta}\right]m_\lambda
\end{equation}
where $\Delta^{\!+}$ denotes the positive root system,
$\delta{=}\dynkincomma{1,1,\ldots,1}$ is half the sum of all positive roots in
the $\omega$-basis and $m_{\lambda+k\alpha}$ is the already computed
multiplicity of a weight $\lambda+k\alpha$ that is higher than $\lambda$. The
sum over $k$ is finite because the weight $\lambda+k\alpha$ must be a member of
the weight system of the irrep under consideration.
The recursive nature of the Freudenthal formula makes the computer computation of weight
multiplicities the most CPU time consuming procedure in the
determination of the weight system of an irrep. The algorithm developed in
\cite{Moody:1982} exploits the Weyl group in both the weight system and the
root system. The weight system of an irrep is a collection of Weyl group orbits,
represented by one dominant weight. The multiplicity of the dominant weight is
the same for all weights of the associated orbit. Thus, a weight system can be
constructed by (1) determining the dominant weights of the irrep, (2) computing
their multiplicity and (3) generating the orbits of the dominants weights with the
same multiplicity by application of the Weyl group of the associated algebra.
In LieART the function \com{SingleDominantWeightSystem[\args{irrep}]} determines
the dominant weights of \args{irrep} by successively subtracting positive roots
starting from the highest weight and keeping only the dominant weight of the
result in every step. This process terminates, because there are smallest
dominant weights, i.e., the fundamental weights, constituting a lower boundary.
E.g., the \irrep{40} of \A4 has two distinct dominant weights:
\begin{mathin}
SingleDominantWeightSystem[Irrep[A][0, 0, 1, 1]]
\end{mathin}
\begin{mathout}\label{out:SingleDominantWeights}
\{\weight{0, 0, 1, 1},\weight{0, 1, 0, 0}\}
\end{mathout}
Thus, an improved version of the Freudenthal formula considers only dominant
weights $\lambda$. The second exploitable property is the existence of a
stabilizer of the weight, a subgroup of its Weyl group $W$ that fixes the
weight:
\begin{equation}
\text{Stab}_W(\lambda) = W_T := \left\{w\in W\,|\: w\lambda=\lambda\right\}.
\end{equation}
The stabilizer $W_T$ reduces the number of independent scalar products and
previous computed multiplicities, because for $w\in W_T$,
$\scalarproduct{\lambda{+}k\alpha}{\alpha}{=}\scalarproduct{w(\lambda{+}
k\alpha_i)}{w\alpha_i}{=}\scalarproduct{\lambda{+}kw\alpha_i)}{w\alpha_i}$ and
$m_{\lambda+k\alpha_i}{=}m_{w(\lambda+k\alpha_i)}{=}m_{\lambda+kw\alpha_i}$.
Since the elements of the Weyl group $W$ are reflections at simple roots, the
stabilizer group is defined by the reflection at simple roots that map $\lambda$
onto itself. Because of \eqref{eq:WeylReflection} this is the case, when
$\scalarproduct{\lambda}{\alpha_i}=0$. If $\lambda$ is expressed in the
$\omega$-basis as $\lambda{=}\sum n_i\omega_i$ the scalar product with the $i$th
simple root is zero, when $n_i=0$. Let $T$ be a set of these indices, i.e.,
$T{=}\left\{i\,|\: n_i{=}0\right\}$, and let $\Delta_T$ be the root system based
on the simple roots labeled by $T$.
The group $\hat{W}_T$, which is the inclusion of $W_T$ and the negative
identity element $w{=}{-}1$ as $\hat{W}_T{=}\left<W_T,{-}1\right>$, decomposes the root
system into orbits $o_1,\ldots,o_r$, defined by $\hat{W}_T\alpha_i$. Each
orbit has a unique representative $\xi_i$ in the positive roots
($\xi_i{\in}\Delta^{\!+}$). The $\xi_i$'s are those positive roots, that have
non-zero coefficients in the $\omega$-basis at the positions, where $\lambda$
has zeros, i.e., $\xi_i{=}\sum m_i\omega_i$ with $m_i{\geq}0$ for $i{\in}T$.
The computation of the multiplicity $m_\lambda$ of the dominant weight $\lambda$
is then accomplished by the \emph{modified Freudenthal formula}:
\begin{equation}
\sum_{i=1}^n \left|o_i\right|\sum_{k=1}^\infty\scalarproduct{\lambda{+}k\xi_i}{\xi_i}m_{\lambda+k\xi_i}
= \left[\scalarproduct{\Lambda{+}\delta}{\Lambda{+}\delta}-\scalarproduct{\lambda{+}\delta}{\lambda{+}\delta}\right]m_\lambda
\end{equation}
where $\left|o_i\right|$ are the sizes of the orbits. It is important to note
that these sizes are $\left|o_i\right|{=}\left|W_T\xi_i\right|$ if
$\xi_i{\in}\Delta_T$ and $\left|o_i\right|{=}2\left|W_t\xi_T\right|$ if
$\xi_i{\notin}\Delta_T$, because in the former case the negative roots are
included in $W_T\xi_i$, i.e., the $-1$, while only positive roots are in the
orbit $W_T\xi_i$ if $\xi_i{\notin}\Delta_T$, requiring a factor of 2 for the
same reason as on the left-hand side of \eqref{eq:FreudenthalFormula}. It is
$\xi_i{\in}\Delta_T$ if $\scalarproduct{\lambda}{\xi_i}{=}0$.
The higher weight $\lambda+k\xi_i$ is not necessarily a dominant weight, but can
always be reflected to the dominant chamber to obtain the corresponding
multiplicity that is already computed.
The computation of weight multiplicities is implemented in LieART as
\com{WeightMultiplicity[\args{weight},\args{irrep}]} following the above
algorithm, using several helper functions: \com{T[\args{weight}]} gives the
set $T$, the positions of zeros of the coefficients of \args{weight} in the
$\omega$-basis, \com{Xis[\args{algebra},\args{t}]} determines the $\xi$'s based
on the set $T$ which should be supplied via the argument \args{t},
\com{Alphas[\args{algebra},\args{t}]} gives $\alpha_i$ with $i\in T$ to
construct the orbit $W_T\xi_i$. \com{XisAndMul[\args{algebra},\args{t}]} yields
a list of the $\xi_i$'s together with their associated orbit size
$\left|o_i\right|$. Since the possible subsets $T$ of zeros in the weight
coefficients for a specific algebra are limited, we follow the suggestion of
\cite{Moody:1982} and save this in list form as \com{XisAndMul} for reuse
upon first evaluation in the course of a calculation. Saved values of
\com{XisAndMul} can be retrieved by \com{Definition[XisAndMul]}.
Take for example the dominant weight \weight{0, 1, 0, 0} of the \irrep{40} of
\A4 from \outref{out:SingleDominantWeights}. The set of indices $T$ is
\begin{mathin}
T[Weight[A][0,1,0,0]]
\end{mathin}
\begin{mathout}
$\begin{pmatrix} 1 \\ 3 \\ 4 \end{pmatrix}$
\end{mathout}
or in \com{InputForm}: \com{\{\{1\},\{3\},\{4\}\}}. (The structure as ``list of
lists'' is due to the use of the Mathematica built-in functions \com{Position}
and \com{Extract}.) The $\xi_i$'s and the sizes of their associated orbits
$|o_i|$ are
\begin{mathin}
XisAndMul[A4, T[Weight[A][0,1,0,0]]]
\end{mathin}
\begin{mathout}
$\begin{pmatrix}
\weight{1, 0, 0, 1} & 12 \\
\weight{0, {-}1, 1, 1} & 6 \\
\weight{2, {-}1, 0, 0} & 2 \\
\end{pmatrix}$
\end{mathout}
and the weight multiplicity of the dominant weight in the \irrep{40} of \A4 is:
\begin{mathin}
WeightMultiplicity[Weight[A][0,1,0,0], Irrep[A][0,0,1,1]]
\end{mathin}
\begin{mathout}
2
\end{mathout}
The LieART function \com{DominantWeightSystem[\args{irrep}]} gives a list of the
dominant weights of \args{irrep} along with their multiplicities:
\begin{mathin}
DominantWeightSystem[Irrep[A][0,0,1,1]]
\end{mathin}
\begin{mathout}
$\begin{pmatrix}
\weight{0, 0, 1, 1} & 1 \\
\weight{0, 1, 0, 0} & 2 \\
\end{pmatrix}$
\end{mathout}
Thus weight system of the \irrep{40} consists of the Weyl group orbit of
\weight{0, 0, 1, 1} and \weight{0, 1, 0, 0}, where every weight of the latter
has a multiplicity of two. The function \com{WeightSystem[\args{irrep}]}
generates these orbits based on \com{DominantWeightSystem} and explicitly
duplicates weights according to the multiplicity. If no further processing is
intended the option \com{SpindleShape->True} turns the output into a spindle
shape display, with weights of the same level in one row. E.g., the weight
system of the \irrep{40} of \A4 in spindle shape is
\begin{mathin}
WeightSystem[Irrep[A][0,0,1,1]]
\end{mathin}
\begin{mathout}
\begin{minipage}{4.4in}
\begin{center}
\weight{0, 0, 1, 1}\linebreak
\weight{0, 0, 2, {-}1}\weight{0, 1, {-}1, 2}\linebreak
\weight{0, 1, 0, 0}\weight{0, 1, 0, 0}\weight{1, {-}1, 0, 2}\linebreak
\weight{{-}1, 0, 0, 2}\weight{0, 1, 1, {-}2}\weight{0, 2, {-}2, 1}\weight{1, {-}1, 1, 0}\weight{1, {-}1, 1, 0}\linebreak
\weight{{-}1, 0, 1, 0}\weight{{-}1, 0, 1, 0}\weight{0, 2, {-}1, {-}1}\weight{1, {-}1, 2, {-}2}\weight{1, 0, {-}1, 1}\weight{1, 0, {-}1, 1}\linebreak
\weight{{-}1, 0, 2, {-}2}\weight{{-}1, 1, {-}1, 1}\weight{{-}1, 1, {-}1, 1}\weight{1, 0, 0, {-}1}\weight{1, 0, 0, {-}1}\weight{2, {-}2, 0, 1}\linebreak
\weight{{-}1, 1, 0, {-}1}\weight{{-}1, 1, 0, {-}1}\weight{0, {-}1, 0, 1}\weight{0, {-}1, 0, 1}\weight{1, 1, {-}2, 0}\weight{2, {-}2, 1, {-}1}\linebreak
\weight{{-}2, 0, 0, 1}\weight{{-}1, 2, {-}2, 0}\weight{0, {-}1, 1, {-}1}\weight{0, {-}1, 1, {-}1}\weight{2, {-}1, {-}1, 0}\linebreak
\weight{{-}2, 0, 1, {-}1}\weight{0, 0, {-}1, 0}\weight{0, 0, {-}1, 0}\linebreak
\weight{{-}2, 1, {-}1, 0}\weight{1, {-}2, 0, 0}\linebreak
\weight{{-}1, {-}1, 0, 0}\linebreak
\end{center}
\end{minipage}
\end{mathout}
\subsubsection{Properties of Irreducible Representations}
\paragraph{Dimension}
The dimension of an irrep, i.e., the number of its weights, can be calculated
without explicitly generating the weight system. The \emph{Weyl dimension
formula}, which is a special case of Weyl's character formula, gives the
dimension of an irrep in terms of its highest weight $\Lambda$, positive roots
$\alpha{\in}\Delta^{\!+}$ and $\delta{=}\dynkincomma{1,1,\ldots,1}$:
\begin{equation}
\dim(\Lambda) = \prod_{\alpha\in\Delta^{\!+}\!}\frac{\scalarproduct{\alpha}{\Lambda + \delta}}{\scalarproduct{\alpha}{\delta}}.
\end{equation}
It is implemented in LieART as \com{Dim[\args{irrep}]}. E.g., the dimension of
the \irrep{40} of \A4 can be obtained by
\begin{mathin}
Dim[Irrep[A][0,0,1,1]]
\end{mathin}
\begin{mathout}
40
\end{mathout}
The Dynkin label of \args{irrep} does not need to be numerical. By using
variables the simple structure of the formula becomes explicit, e.g., for a
general irrep of \A4:
\begin{mathin}
Dim[Irrep[A][a,b,c,d]]
\end{mathin}
\begin{mathout}
$\displaystyle\frac{1}{288}(a+1)(b+1)(c+1)(d+1)(a+b+2)(b+c+2)(c+d+2)(a+b+c+3)(b+c+d+3)(a+b+c+d+4)$
\end{mathout}
Internally, the Weyl dimension formula is computed by
\com{WeylDimensionFormula[\args{algebra}]} as a pure function with the digits of
the Dynkin label as parameters. E.g. for \A4:
\begin{mathin}
WeylDimensionFormula[A4]//InputForm
\end{mathin}
\begin{mathout}
Function[\{a1,a2,a3,a4\},((1{+}a1){*}(1{+}a2){*}(1{+}a3){*}(1{+}a4){*}(2{+}a1{+}a2){*}(2{+}a2{+}a3)\newline
{*}(2{+}a3{+}a4){*}(3{+}a1{+}a2{+}a3){*}(3{+}a2{+}a3{+}a4){*}(4{+}a1{+}a2{+}a3{+}a4))/288]
\end{mathout}
The function \com{Dim[\args{irrep}]} acts only as a wrapper applying the pure
function to the specified Dynkin label in the argument of \com{Dim}.
\paragraph{Index}
Another important property of an irrep $\Lambda$ is its \emph{index}, denoted as
$l(\Lambda)$, which is an eigenvalue of the \emph{Casimir invariant} normalized
to be an integer:
\begin{equation}
l(\Lambda) = \frac{\text{dim}(\Lambda)}{\text{ord}(L)}\scalarproduct{\Lambda}{\Lambda+2\delta},
\end{equation}
where $\text{ord}(L)$ is the order of the Lie algebra, which is equivalent to
the number of roots or the dimension of the adjoint irrep. The index is related
to the length of the weights and has applications in renormalization group
equations and elsewhere. The corresponding LieART function is
\com{Index[\args{irrep}]}. E.g., the index of the \irrep{40} of \A4 is:
\begin{mathin}
Index[Irrep[A][0,0,1,1]]
\end{mathin}
\begin{mathout}
22
\end{mathout}
The label of \args{irrep} does not need to be numerical, similar to \com{Dim}.
\paragraph{Congruency Class}
The \emph{congruency class} expands the concept of $n$-ality of \SU{N}, which in
turn is a generalization of \SU3 triality, to all other simple Lie
Algebras. LieART uses congruency classes to characterize irreps, especially for
the distinction of irreps of the same dimension and with the same index. We
follow the definitions of \cite{lemire_congruence_1980,McKay:99021}, labeling
the congruency class by the \emph{congruency number}, which is a single number
for \A{n}, \B{n}, \C{n}, \E6, \E7, \E8, \F4 and \G2 and a two component vector
for \D{n}. For an irreducible representation \dynkin{a_1a_2\ldots a_n} the
congruency number (vector) $c$ is:
\begin{align}
\A{n} &:& c &= \sum_{k=1}^n a_n\ (\text{mod}\ n+1)\\
\B{n} &:& c &= a_n\ (\text{mod}\ 2)\\
\C{n} &:& c &= a_1 + a_3 + a_5 + \ldots\ (\text{mod}\ 2)\\
\D{n} &:& c &= \begin{pmatrix}
a_{n-1}+a_n\ (\text{mod}\ 2) \\
2a_1+2a_3+\ldots+2a_{n-2}+(n-2)a_{n-1}+n a_n\ (\text{mod}\ 4)
\end{pmatrix}
\ \text{for $n$ odd}\qquad\qquad\\
&& c &= \begin{pmatrix}
a_{n-1}+a_n\ (\text{mod}\ 2)\\
2a_1+2a_3+\ldots+2a_{n-3}+(n-2)a_{n-1}+n a_n\ (\text{mod}\ 4)
\end{pmatrix}
\ \text{for $n$ even}\qquad\qquad\\
\E6 &:& c &= a_1 - a_2 + a_4 - a_5 \ (\text{mod}\ 3)\\
\E7 &:& c &= a_4 + a_6 + a_7\ (\text{mod}\ 2)\\
\E8,\F4,\G2&:& c &= 0
\end{align}
Please note that the congruency class definitions of
\cite{lemire_congruence_1980,McKay:99021}, which we use, differ from
\cite{Slansky} for the \D{n}'s : For \D4, i.e.\ \SO8, the second component of
\cite{Slansky} is half of the definition above. The congruency class for \D5 is
only a single number in \cite{Slansky}, which is the same as the second
component of the \D5 congruency class vector of our definition from
\cite{lemire_congruence_1980,McKay:99021} (with no factor of 2).
These congruency numbers (vectors) $c$ are implemented as
\com{CongruencyClass[\args{irrep}]} in LieART. E.g., the congruency numbers of
the two eight dimensional irreps of \SO8 are all distiguished by their
congruency number vector:
\begin{mathin}
CongruencyClass[\{Irrep[D][1,0,0,0], Irrep[D][0,0,1,0], Irrep[D][0,0,0,1]\}]
\end{mathin}
\begin{mathout}
\{(02),(12),(10)\}
\end{mathout}
The head of a congruency vector is \com{CongruencyVector} and it is displayed as
$(c_1c_2)$, i.e. without commas separating the two components similar to the
Dynkin label of irreps.
\subsubsection{Representation Names}
The Dynkin label of an irreducible representation together with its Lie algebra
uniquely specifies it, e.g., \dynkin{0,0,1,1} of \A4. An irrep in LieART is also
represented (\com{FullForm}) by the Dynkin label and a Mathematica head that
indicates the algebra class. E.g., the irrep \dynkin{0,0,1,1} of \A4 is
represented by \com{Irrep[A][0,0,1,1]} in LieART.
However, it is common practice to name representations by their dimension, the
\emph{dimensional name}, which is often times shorter. The dimension of a
representation is not unique, i.e., there are different irreps with the same
dimensions, which might be accidental or because of a relation between them. If
it is accidental, irreps with the same dimension have primes ($\text{\bf
dim}^\prime$) in their dimensional name, e.g., the \irrep[1]{175} of \A4 is
unrelated to the \irrep{175}. Irreps can be related by conjugation, when they
are complex. One of the irreps is written with an overbar ($\irrepbar{dim}$).
E.g., the \irrepbar{10} of \A4 is the conjugate of the \irrep{10}. Due to the
high symmetry of \SO8 irrep, more than two related irreps of the same dimension
exist. In the case of \SO8 subscripts specify the irreps completely.
The introduced properties of representations, the dimension, the index and the
congruency class serve us well to discriminate between irreps with the same
dimension. LieART has an algorithm implemented that determines the dimensional
name of an irrep, following the naming conventions of \cite{Slansky}:
\begin{enumerate}
\item
To determine the dimensional name of a specific irrep, LieART collects
other irreps of the same dimensionality by brute-force scanning through a
generated set of irreps.
\item
Irreps that are related by conjugation or the symmetry of \SO8 not only
have the same dimension, but also the same index. \emph{Un}related irreps of
equal dimension have different indices and can be organized and labeled by their
indices. They are sorted by ascending index and labeled with primes ($\text{\bf
dim}^\prime$) accordingly, starting with no prime. E.g., the names of the three
unrelated 70 dimensional irreps of \A4 are (the congruency class of \A4 is
called ``Quintality''):
\begin{center}
\begin{tabular}{lllll}
\toprule
\textbf{Dynkin} & \textbf{Dimension} & \textbf{Index} & \textbf{Quintality} & \textbf{Name} \\
\midrule
\dynkin{2, 0, 0, 1} & 70 & 49 & 1 & \irrep{70} \\
\dynkin{0, 0, 0, 4} & 70 & 84 & 1 & \irrep[1]{70} \\
\bottomrule
\end{tabular}
\end{center}
\item
Related irreps of the same dimensionality have the same index, but
mostly (see below) different congruency class number. For Lie algebras other
than \SO8, only the conjugate of complex irreps are related. The convention here
is that the irrep with \emph{higher} congruency class number of the conjugated
pair is labeled with an overbar ($\irrepbar{dim}$). Since e.g. the \irrep[1]{70}
is a complex irrep it has a related conjugated irrep, the \irrepbar[1]{70},
i.e., overbars and primes may both appear in the labeling of an irrep. The above
table for the determination of the primes involves only the lower congruency
class number irreps of same dimensional and same index irreps. Consider the
\irrep[1]{70} and its conjugate, the \irrepbar[1]{70}:
\begin{center}
\begin{tabular}{lllll}
\toprule
\textbf{Dynkin} & \textbf{Dimension}& \textbf{Index} & \textbf{Quintality} & \textbf{Name} \\
\midrule
\dynkin{0, 0, 0, 4} & 70 & 84 & 1 & \irrep[1]{70} \\
\dynkin{4, 0, 0, 0} & 70 & 84 & 4 & \irrepbar[1]{70} \\
\bottomrule
\end{tabular}
\end{center}
If the congruency class number of a complex irrep is zero, it conjugate
also has a congruency class number of zero. In this case, where all three, the
dimension, the index and the congruency class number are the same, the structure
of the Dynkin labels are consulted: With the Dynkin label interpreted as digits
of an integer number, the \emph{smaller} ``number'' is labeled with the overbar.
E.g., the 126 dimensional irreps of \A4 are
\begin{center}
\begin{tabular}{lllll}
\toprule
\textbf{Dynkin} & \textbf{Dimension}& \textbf{Index} & \textbf{Quintality} & \textbf{Name} \\
\midrule
\dynkin{2, 0, 1, 0} & 126 & 105 & 0 & \irrep{126} \\
\dynkin{0, 1, 0, 2} & 126 & 105 & 0 & \irrepbar{126} \\
\dynkin{5, 0, 0, 0} & 126 & 210 & 0 & \irrep[1]{126} \\
\dynkin{0, 0, 0, 5} & 126 & 210 & 0 & \irrepbar[1]{126} \\
\bottomrule
\end{tabular}
\end{center}
Observe that this rule only applies for zero congruency class number:
The \irrepbar[1]{70} has a ``larger'' number \dynkin{4, 0, 0, 0} than the
\irrep[1]{70} with \dynkin{0, 0, 0, 4}.
\item
For \SO8 irreps the convention for the labeling with primes are the same
as for all other Lie algebras. Due to the three-fold symmetry most irreps come
in sets of three with the same dimension and index. If only one digit of the
Dynkin label is non-zero it is called the spinor, vector or conjugate irrep,
depending on the dot in the Dynkin diagram it corresponds to. Usually they can
be distinguished by the congruency class number, which is a two component vector
for \SO8: (01) for a vector irrep, (10) for a spinor and (11) for the conjugate.
The irrep is then labeled by the subscripts ``v'', ``s'' and ``c'', resp. E.g.,
the three 8 dimensional irreps of \SO8 are
\begin{center}
\begin{tabular}{lllll}
\toprule
\textbf{Dynkin} & \textbf{Dimension}& \textbf{Index} & \textbf{Congruency vector} & \textbf{Name} \\
\midrule
\dynkin{1, 0, 0, 0} & 8 & 1 & (01) & \irrepsub{8}{v} \\
\dynkin{0, 0, 0, 1} & 8 & 1 & (10) & \irrepsub{8}{s} \\
\dynkin{0, 0, 1, 0} & 8 & 1 & (11) & \irrepsub{8}{c} \\
\bottomrule
\end{tabular}
\end{center}
Some irreps with more than one non-zero digit of the Dynkin label with
the same congruency vectors as above are labeled the same way if they are
unique. However, if there is more than one irrep with the same dimension, index
and also congruency vector, there is more than one digit of the Dynkin label
non-zero. The subscript label is then a mixture like ``sv'', and the ordering is
determined by the Dynkin digit beginning with the largest. E.g. the 224
dimensional irreps of \SO8:
\begin{center}
\begin{tabular}{lllll}
\toprule
\textbf{Dynkin} & \textbf{Dimension}& \textbf{Index} & \textbf{Congruency vector} & \textbf{Name} \\
\midrule
\dynkin{2, 0, 1, 0} & 224 & 100 & (12) & \irrepsub{224}{vc} \\
\dynkin{2, 0, 0, 1} & 224 & 100 & (10) & \irrepsub{224}{vs} \\
\dynkin{1, 0, 2, 0} & 224 & 100 & (02) & \irrepsub{224}{cv} \\
\dynkin{1, 0, 0, 2} & 224 & 100 & (02) & \irrepsub{224}{sv} \\
\dynkin{0, 0, 2, 1} & 224 & 100 & (10) & \irrepsub{224}{cs} \\
\dynkin{0, 0, 1, 2} & 224 & 100 & (12) & \irrepsub{224}{sc} \\
\bottomrule
\end{tabular}
\end{center}
There are also cases where the congruency vector is zero in both
components for all irreps of the same dimension and index. In this case
subtracting the same integer from every Dynkin digit to obtain irreps with
non-zero congruency class vector has proven to be a reliable way to label the
irreps. E.g., the 35 dimensional irreps can be related to the 8 dimensional ones
and, e.g., the primed 840 dimensional irreps to the 56 dimensional ones:
\begin{center}
\begin{tabular}{lllll}
\toprule
\textbf{Dynkin} & \textbf{Dimension}& \textbf{Index} & \textbf{Congruency vector} & \textbf{Name} \\
\midrule
\dynkin{1, 0, 0, 0} & 8 & 1 & (02) & \irrepsub{8}{v} \\
\dynkin{0, 0, 1, 0} & 8 & 1 & (12) & \irrepsub{8}{c} \\
\dynkin{0, 0, 0, 1} & 8 & 1 & (10) & \irrepsub{8}{s} \\
\dynkin{2, 0, 0, 0} & 35 & 10 & (00) & \irrepsub{35}{v} \\
\dynkin{0, 0, 2, 0} & 35 & 10 & (00) & \irrepsub{35}{c} \\
\dynkin{0, 0, 0, 2} & 35 & 10 & (00) & \irrepsub{35}{s} \\ \midrule
\dynkin{1, 0, 1, 0} & 56 & 15 & (10) & \irrepsub{56}{s} \\
\dynkin{1, 0, 0, 1} & 56 & 15 & (12) & \irrepsub{56}{c} \\
\dynkin{0, 0, 1, 1} & 56 & 15 & (02) & \irrepsub{56}{v} \\
\dynkin{2, 0, 2, 0} & 840 & 540 & (00) & \irrepsub[1]{840}{s} \\
\dynkin{2, 0, 0, 2} & 840 & 540 & (00) & \irrepsub[1]{840}{c} \\
\dynkin{0, 0, 2, 2} & 840 & 540 & (00) & \irrepsub[1]{840}{v} \\
\bottomrule
\end{tabular}
\end{center}
\end{enumerate}
In LieART the function \com{DimName[\args{irrep}]} determines the dimensional
name according to the algorithm described above, which is automatically
displayed if an irrep is displayed in \com{TraditionalForm}. Several internal
helper functions are called by \com{DimName}. The function
\com{GetIrrepByDim[\args{algebra},\args{dim},\args{maxDynkinDigit}]} provides
irreps with the same dimension, which are then gathered into sublists by
\com{DimName}.
The function \com{SortSameDimAndIndex} sorts the irreps of same dimension and
index by their congruency class, and automatically by the Dynkin label viewed as
``numbers'', if the congruency class numbers are the same. The positions of the
lists of same-index irreps determines the number of primes to apply and the
position of the irrep within the same-index list, whether it should be labeled
by an overbar. In case of an \SO8 irrep \com{DimName} branches to the function
\com{SO8Label[\args{irrep}]}, which uses \com{SimpleSO8Label} to give a
subscript of ``v'', ``s'' and ``c'' in the case where the congruency vector in
unique. If the congruency vector is not unique, but non-zero,
\com{ConcatSO8Label} concatenates the mixed subscripts like ``sv'' in the
correct ordering. If the congruency vector is zero in both components the irrep
is related to irreps with non-zero congruency vector by
\com{ReducedDynkinLabel}.
\paragraph{Limitations}
The determination of the primes has one limitation, which requires explanation:
The function \com{GetIrrepByDim[\args{algebra},\args{dim},\args{maxDynkinDigit}]}
determines irreps of the same dimension. In a brute-force fashion it generates
``all'' irreps and extracts only those that have the dimension \args{dim}. Since
there are infinite many irreps of any Lie algebra, it must be constrained. This
is done by imposing a maximum Dynkin digit to use for the generation of Dynkin
labels. Since the numbers of possible Dynkin labels grow rapidly with the
maximum Dynkin digit allowed, the limit should be very low. To compare the irrep
in question with others it should be at least its maximum Dynkin digit, e.g. for
\dynkin{2,0,3,1} it is ``3''. The related irreps only have a permutation of the
Dynkin label, thus they are included in the generated list of irreps up to a
Dynkin digit of ``3'' in the example. However, for the determination of the
primes for the unrelated irreps it may not suffice to generate irreps only up to
the maximum Dynkin digit of the irrep in question: The number of primes are
determined by the position in a list of same-dimensional irreps sorted by the
index. If there is an irrep with a higher maximal Dynkin digit exists, e.g.,
``4'' in our example, but at the same time has a lower index than the irrep it
question, this procedure would give the irrep in question too few primes. This
situation rarely happens, especially in \A{n}'s, but e.g.\ for \G2 it
happens as early as the 77 dimensional irrep:
\begin{center}
\begin{tabular}{lllll}
\toprule
\textbf{Dynkin} & \textbf{Dimension}& \textbf{Index} & \textbf{Congruency number} & \textbf{Name} \\
\midrule
\dynkin{3, 0} & 77 & 44 & 0 & \irrep{77} \\
\dynkin{0, 2} & 77 & 55 & 0 & \irrep[1]{77} \\
\bottomrule
\end{tabular}
\end{center}
When determining the name of \dynkin{0, 2} of \G2 the Dynkin labels would
only be generated up to a maximum Dynkin digit of ``2'', the \dynkin{3, 0} would
not appear and thus the \dynkin{0, 2} would be labeled without any prime. The
determination of the name of \dynkin{3, 0} would ``see'' the \dynkin{0, 2}, but
would determine no prime for \dynkin{3, 0} because of its lower index compared
to \dynkin{0, 2}. For these two irreps the problem can be solved by generating
irreps up to the maximum Dynkin digit \emph{plus one} for the irrep in question,
i.e., up to ``3'' for \dynkin{0, 2}. Because the Dynkin label of a \G2 irrep is
small, this is easily manageable. In fact, we have implemented the addition of
three to the maximum Dynkin digit for \G2, because for some higher dimensions
the problem will reappear. However, for Lie algebras with long Dynkin labels,
the number of generated Dynkin labels becomes large and its construction slows
LieART down and consumes a large amount of memory. We have found a balance
between accuracy and efficiency, which pushes this problem to very high
dimensional irreps, by defining the following number to add to the maximum
Dynkin digit of the irrep in question: 1 for \A{n},\B{n},\C{n} and \D{n} with
$n\leq 4$, 0 for \A{n},\B{n},\C{n} and \D{n} with $n\geq 5$, 1 for \E6 and \F4,
0 for \E7 and \E8 and 3 for \G2. Please note that this limitation is only
connected to the labeling of irreps with primes. Computations in LieART are
always performed using the Dynkin label as in the \com{FullForm}. If in doubt
one can always use the Dynkin label displayed in \com{StandardForm},
\com{InputForm} and \com{FullForm} which serves as the unique identifier of an
irrep.
Besides its Dynkin label, the alternative definition of \com{Irrep} as \com{Irrep[\args{algebra}][\args{dimname}]} can be used to specify an irrep by its dimensional name as \args{dimname} and its algebra as \args{algebra}.
The \args{algebra} must be fully specified, such as \com{A4}, \com{SU5}, \com{E6}, not only the algebra class such as \com{A}. The \args{dimname} is an integer for the dimension with a \com{Bar[\args{dim}]}
wrapped around it for a conjugated irrep or an \com{IrrepPrime[\args{dim},\args{m}]} for an irrep with \args{n} primes. If only one prime is needed the second argument \args{n} may be omitted.
The \com{Bar} and \com{IrrepPrime} can be combined in any sequence. E.g., the \irrepbar[1]{175} can be entered by
\begin{mathin}
Irrep[A4][IrrepPrime[Bar[175]]]//InputForm
\end{mathin}
\begin{mathout}
Irrep[A][0, 0, 2, 1]
\end{mathout}
Alternatives are \texttt{Irrep[SU5][IrrepPrime[Bar[175]]]}, \texttt{Irrep[SU5][Bar[IrrepPrime[175]]]} and \texttt{Irrep[A4][IrrepPrime[Bar[175],1]]}.
Internally the function \com{GetIrrepByDimName[\args{algebra}, \args{dimname}]} determines the corresponding Dynkin label. It uses the function \com{GetIrrepByDim} mentioned above to find all irreps with the
same dimension and then extract the irrep with the identical dimensional name. If the user specifies an irrep that does not exist, e.g. an \irrepbar{11} of \A4, the comparison must stop at some point.
It has been chosen that \com{GetIrrepByDim} generates only irreps with a maximum Dynkin digit as set by the global variable \com{\$MaxDynkinDigit}. The default is \com{\$MaxDynkinDigit}=3. The consequence is
that the determination of the correct Dynkin label of the entered irrep may abort, because the irrep does not exist or that it involves a Dynkin Digit higher that \com{\$MaxDynkinDigit}=3. The latter is
the case for the \irrep[1]{70} with a Dynkin label of \dynkin{0,0,0,4}. LieART prints an error message indicating the two possible scenarios:
\begin{mathin}
Irrep[A4][IrrepPrime[70]]//InputForm
\end{mathin}
\begin{mathout}
\color{darkbrown}Irrep::noirrep: Either an irrep with the dimension name \irrep[1]{70} does not exist in SU(5) or it has at least one Dynkin digit higher than 3. Try with \$MaxDynkinDigit set to a higher value than 3. >>
\end{mathout}
Increasing \com{\$MaxDynkinDigit} to 4 resolves the issue:
\begin{mathin}
\nohangingindent
\$MaxDynkinDigit=4;\newline
Irrep[A4][IrrepPrime[70]]//InputForm
\end{mathin}
\begin{mathout}
Irrep[A][0,0,0,4]
\end{mathout}
\subsection{Tensor Product Decomposition}
\subsubsection{Generic Algorithm}
\label{ssec:TensorProductGeneric}
\definition{
\com{DecomposeProduct[\args{irreps}]} & Decomposes the tensor product of \args{irreps}\\
\com{DominantWeightsAndMul[\args{weights}]} & Filters and tallies dominant weights of \args{weights} by multiplicities\\
\com{SortOutIrrep[\args{dominantWeightsAndMul}]} & Sorts out the irrep of largest height from the collection of dominant weights \args{dominantWeightsAndMul}
}{Tensor product decomposition.}
Tensor products of irreps can be decomposed into a direct sum of irreps. The product of two irreps $R_1$ and $R_2$ can be decomposed as
\begin{equation}\label{eq:TensorProduct}
R_1\otimes R_2 = \sum_i m_i R_i
\end{equation}
with the following dimension and index sum rules:
\begin{align}
\dim(R_1\otimes R_2)&= \dim(R_1)\cdot\dim(R_2)=\sum_i m_i \dim(R_i)\\
l(R_1\otimes R_2)&= l(R_1)\dim(R_2)+\dim(R_1)l(R_2)=\sum_i m_i\, l(R_i).
\end{align}
\vspace{-13pt}
A straight-forward method to compute the right-side of \eqref{eq:TensorProduct} is the following: Add all weights of $R_2$ to each weight of $R_1$. The resulting $\dim(R_1)\cdot\dim(R_2)$ weights belong to the
different irreps $R_i$, which must be sorted out. Instead of all weights, one can consider just the dominant weights in the product, as each of the dominant weights represents an orbit in the irreps $R_i$.
As an irrep is a collection of orbits, some of the dominant weights in the product represent the highest weight of an irrep in the decomposition.
There is a unique dominant weight that represents the irrep of largest height in the decomposition. The sorting procedure should start with this dominant weight viewed as the highest weight of
an irrep and then construct the dominant weight system of the corresponding irrep. The dominant weight system of the irrep with largest height should then be subtracted from the combined dominant weights of the product to filter it out.
The same procedure is applied recursively to the remaining set of dominant weights until it is empty, i.e., all irreps have been filtered out. We will demonstrate this procedure with LieART in the following paragraphs.
LieART provides the function \com{DecomposeProduct[\args{irreps}]} for the decomposition of the tensor product of arbitrary many \args{irreps} of any classical or exceptional Lie algebra as argument. As a demonstration of the algorithm implemented we
consider the decomposition of the \SU3 tensor product $\irrep{8}{\otimes}\irrep{8}$, which is
\begin{mathin}
DecomposeProduct[Irrep[SU3][8], Irrep[SU3][8]]
\end{mathin}
\begin{mathout}
$\irrep{1}+2(\irrep{8})+\irrep{10}+\irrepbar{10}+\irrep{27}$
\end{mathout}
To obtain the result LieART adds all weights of the second \irrep{8} to each weight of the first \irrep{8}, using the built-in Mathematica function \com{Outer}.
It filters out only the dominant weights and tallies multiple occurrences thereof using the LieART function \com{DominantWeightsAndMul[\args{weights}]}, which also sorts the weights according to their height, when viewed as
a highest weight of an irrep. For the \SU3 tensor product $\irrep{8}{\otimes}\irrep{8}$ the dominant weights along with their multiplicities are
\begin{mathin}
DominantWeightsAndMul[Flatten[Outer[Plus, WeightSystem[Irrep[SU3][8]], WeightSystem[Irrep[SU3][8]]]]]
\end{mathin}
\begin{mathout}\label{out:8otimes8}
$\begin{pmatrix}
\weight{2, 2} & 1\\
\weight{0, 3} & 2\\
\weight{3, 0} & 2\\
\weight{1, 1} & 6\\
\weight{0, 0} & 10
\end{pmatrix}$
\end{mathout}
The dominant weight with the largest height \weight{2, 2} must be the highest weight of an irrep. The dominant weight system of the \dynkin{2,2} (the \irrep{27}) of \SU3 is
\begin{mathin}
DominantWeightSystem[Irrep[A][2, 2]]
\end{mathin}
\begin{mathout}\label{out:27}
$\begin{pmatrix}
\weight{2, 2} & 1\\
\weight{0, 3} & 1\\
\weight{3, 0} & 1\\
\weight{1, 1} & 2\\
\weight{0, 0} & 3
\end{pmatrix}$
\end{mathout}
It contains all dominant weights appearing in the tensor product, but with mostly smaller multiplicities. The irrep \dynkin{2,2} can be filtered out by subtracting the multiplicities in \outref{out:27} from \outref{out:8otimes8}.
The LieART function \com{SortOutIrrep[\args{dominantWeightsAndMul}]} performs the task of computing the dominant weight system of the irrep corresponding to the largest height weight and subtracting it from the tensor product. It
returns the dominant weights of the tensor product with the ones of the irrep removed and passes the latter using the \com{Sow} and \com{Reap} mechanism of Mathematica:
\begin{mathin}
Reap[SortOutIrrep[\%\%]]
\end{mathin}
\begin{mathout}\label{out:27filteredout}
$\{\begin{pmatrix}
\weight{0, 3} & 1\\
\weight{3, 0} & 1\\
\weight{1, 1} & 4\\
\weight{0, 0} & 7
\end{pmatrix},\,(\irrep{27})\}$
\end{mathout}
The function \com{SortOutIrrep} is applied recursively until the list of dominant weights with multiplicities of the tensor product is empty. E.g., applying \com{SortOutIrrep} to the dominants weights of \outref{out:27filteredout} filters out the \dynkin{0,3} (the \irrepbar{10}) of \SU3:
\begin{mathin}
Reap[SortOutIrrep[First[\%]]]
\end{mathin}
\begin{mathout}
$\{\begin{pmatrix}
\weight{3, 0} & 1\\
\weight{1, 1} & 3\\
\weight{0, 0} & 6
\end{pmatrix},\,(\irrepbar{10})\}$
\end{mathout}
The irreps filtered out by \com{SortOutIrrep} are collected by the LieART function
\com{GetIrreps}, which passes them on to the calling function \com{DecomposeProduct}, where it is only formatted for the output.
\subsubsection{\SU{N} Decomposition via Young Tableaux}
A correspondence of \SU{N} irreps and Young tableaux is very useful for the calculation of tensor products and subalgebra decomposition by hand.
We have found that the algorithm for the \SU{N} tensor product decomposition via Young tableaux also performs better on the computer, with respect to
CPU time and memory consumption, than the procedure described in the previous section. Thus, LieART uses the Young tableaux algorithm for the tensor
product decomposition of \SU{N} irreps and the procedure of adding weights and filtering out irreps for all other classical and exceptional Lie algebras.
A \emph{Young tableau} is a left-aligned set of boxes, with successive rows having an equal or smaller number of boxes.
Young tableaux correspond to the symmetry of the tensors of \SU{N} irreps, by first writing each index of the tensor into one box of a Young tableau and the prescription that
they ought to be first symmetrized in the rows and then antisymmetrized in the columns. Please see \outref{out:YoungTableau720SU5} in section \ref{sec:QuickStart} as a
non-trivial example for a Young tableau displayed by LieART.
To demonstrate the algorithm for the tensor product decomposition via Young tableaux we use the same \SU3 tensor product as in the previous section, $\irrep{8}{\otimes}\irrep{8}$.
The construction principle (by hand) is to put the Young tableau with the most boxes to the left and bump all boxes of the right Young tableau row by row to the left one following certain rules.
To understand these rules label the boxes of each row of the right Young tableau alphabetically:
\begin{equation}
\irrep{8} \otimes \irrep{8} = \young(\hfil\hfil,\hfil)\otimes\young(aa,b)
\end{equation}
Bump the boxes of the first row of the right tableau (labeled with $a$'s) to the ends of the left tableau to form valid Young tableaux and the additional condition that no $a$'s are in the same column,
as this would change symmetric indices to antisymmetric ones. (see crossed out tableau, which is also not a valid \SU3 tableau, because it has four (or $N{+}1$ in general for \SU{N}) boxes in a column) :
\begin{equation}
\young(\hfil\hfil,\hfil)\otimes\young(aa,b) =
\left(\,
\young(\hfil\hfil aa,\hfil) \oplus
\young(\hfil\hfil a,\hfil a) \oplus
\young(\hfil\hfil a,\hfil,a) \oplus
\young(\hfil\hfil,\hfil a,a) \oplus
\cancel{\young(\hfil\hfil,\hfil,a,a)}\;
\right)\otimes\,\young(b)
\end{equation}
As the next step bump the boxes of the next row to all tableaux obtained by the first step obeying the same rules, but keep only tableaux with an \emph{admissible} sequence of letters, when reading the letters
from right to left row by row. A sequence of letters is admissible if at any point at least as many $a$'s have occurred as $b$'s and likewise for $b$'s and $c$'s, etc. The sequences $abcd$, $aaabc$,$abab$ are admissible,
but $abba$, $acb$ are not. In our example only the sequence $baa$ is not admissible, which appears in the four crossed-out tableaux:
\begin{align}
\young(\hfil\hfil,\hfil)\otimes\young(aa,b) =\;
&
\cancel{\young(\hfil\hfil aab,\hfil)} \oplus
\young(\hfil\hfil aa,\hfil b) \oplus
\young(\hfil\hfil aa,\hfil,b) \oplus
\\&
\cancel{\young(\hfil\hfil ab,\hfil a)} \oplus
\young(\hfil\hfil a,\hfil ab) \oplus
\young(\hfil\hfil a,\hfil a,b) \oplus
\\&
\cancel{\young(\hfil\hfil ab,\hfil,a)} \oplus
\young(\hfil\hfil a,\hfil b,a) \oplus
\\ &
\cancel{\young(\hfil\hfil b,\hfil a,a)} \oplus
\young(\hfil\hfil,\hfil a,ab)
\end{align}
Removing the labeling with $a$'s and $b$'s, we obtain the \SU3 tensor product decomposition of $\irrep{8}{\otimes}\irrep{8}$ in terms of Young tableaux:
\newcommand\oplusvar{\:\oplus\:}
\newcommand\otimesvar{\:\otimes\:}
\begin{equation}
\begin{array}{c@{\otimesvar}c@{\:=\:}c@{\oplusvar}c@{\oplusvar}c@{\oplusvar}c@{\oplusvar}c@{\oplusvar}c}
\yng(2,1) & \yng(2,1) & \yng(4,2) & \yng(4,1,1) & \yng(3,3) & \yng(3,2,1) & \yng(3,2,1) & \yng(2,2,2)
\end{array}
\end{equation}
Finally we knock out triples (full columns with three boxes) to find:
\begin{equation}
\begin{array}{c@{\otimesvar}c@{\:=\:}c@{\oplusvar}c@{\oplusvar}c@{\oplusvar}c@{\oplusvar}c@{\oplusvar}c}
\yng(2,1) & \yng(2,1) & \yng(4,2) & \yng(3) & \yng(3,3) & \yng(2,1) & \yng(2,1) & \:\bullet\\[20pt]
\irrep{8} & \irrep{8} & \irrep{27}& \irrep{10} & \irrepbar{10} & \irrep{8} & \irrep{8} & \irrep{1}
\end{array}
\end{equation}
In LieART the Young tableau algorithm is automatically applied to tensor products of
\SU{N} irreps using \com{DecomposeProduct[\args{irreps}]}. After sorting the
irrep with fewer boxes to the right (which we will call \args{irrep2} opposed
to the first one named \args{irrep1}), the function processes through
\args{irrep2}'s rows to bump boxes to \args{irrep1}. The function
\com{BoxesToBump[\args{irrep2},\,\args{row}]} gives the number of boxes in the
current \args{row} to bump to the tableau of \args{irrep1}. The function
\com{AllowedRows[\args{irrep1},\,\args{nboxes}]} determines the rows of
\args{irrep1} that are allowed to bump boxes to yielding a valid young tableau with
an admissible sequence. The latter is checked by the helper function
\com{AllowedCombination}. The function \com{AddToTableau[\args{irrep1},\,\args{rowcombinations}]}
performs the bumping of the boxes of one row of
\args{irrep2} in all allowed combinations (\args{rowcombinations}) to
\args{irrep1}. The result of the bumping is directly expressed in terms of a
changed Dynkin label.
\subsection{Subalgebra Decomposition}
\definition{
\com{DecomposeIrrep[\args{irrep},\,\args{subalgebra}]} & Decomposes \args{irrep} to the specified \args{subalgebra}.\\
\com{DecomposeIrrep[\args{pIrrep},\,\args{subalgebra},\,\args{pos}]} & Decomposes \args{productIrrep} at position \args{pos} of the product algebra irrep \args{pirrep}.\\
\com{ProjectionMatrix[\args{origin},\args{target}]} & Defines the projection matrix for the algebra-subalgebra pair specified by \args{origin} and \args{target}\\
\com{Project[\args{projectionMatrix},\args{weights}]} & Applies the \args{projectionMatrix} to the \args{weights}\\
\com{GroupProjectedWeights[\args{projectedWeights},\args{target}]} & Groups the projected weights according to the subalgebra specified in \args{target}\\
\com{NonSemiSimpleSubalgebra[\args{origin},\args{simpleRootToDrop}]} & Computes the projection matrix of a maximal non-semisimple subalgebra by dropping one dot of the Dynkin diagram \args{simpleRootToDrop} and turning it into a \U1 charge\\
\com{SemiSimpleSubalgebra[\args{origin},\args{simpleRootToDrop}]} & Computes the projection matrix of a maximal semisimple subalgebra by dropping one dot from the extended Dynkin diagram.\\
\com{ExtendedWeightScheme[\args{algebra},\args{simpleRootToDrop}]} & Adds the Dynkin label associated with the extended simple root ${-}\gamma$ to each weight of the lowest orbit of \args{algebra} and drops the simple root \args{simpleRootToDrop}\\
\com{SpecialSubalgebra[\args{origin},\args{targetirreps}]} & Computes the projection matrix of a maximal special subalgebra by specifying the branching rule of the generating irrep.
}{Subalgebra decomposition of irreps and product algebra irreps.}
The LieART function \com{DecomposeIrrep[\args{irrep},\,\args{subalgebra}]}
decomposes an irrep of a simple Lie algebra into a maximal subalgebra specified
by \args{subalgebra}, which can be simple, semi-simple or non-semi-simple. To
decompose an irrep of a semi-simple or non-semi-simple irrep, a third argument
\args{pos} allows one to specify which part of \args{productIrrep} should be
decomposed into the\args{subalgebra}.
The implementation of \com{DecomposeIrrep} in LieART uses so-called projection
matrices. These matrices project the weights of an irrep into the specified
subalgebra. The resulting weights are further processed in the same manner as
the tensor products of weights discussed above: Only the dominant weights of the
decomposed weights are kept, because they uniquely define the orbits of the
subalgebra and thus its irreps. In the next step the irreps comprised in the
collection of dominant weights are sorted out using the same LieART functions
as for the tensor product decomposition, discussed in section
\ref{ssec:TensorProductGeneric}.
It is clear that the major task is the determination of the projection matrices.
They are different for each algebra-maximal-subalgebra pair and are not unique. (An extensive collection of projection matrices can be found in
\cite{larouche_branching_2009} for the Lie algebra \A{n} and in \cite{larouche_branching_2011} for the Lie algebras \B{n}, \C{n} and \D{n}.)
Once a projection matrix is known it can be used for the decomposition of all irreps of the algebra-maximal-subalgebra pair. E.g., the projection matrix for the branching
$\SU5\to\SU3{\otimes}\SU2{\otimes}\U1$ is
\begin{mathin}
ProjectionMatrix[SU5,ProductAlgebra[SU3,SU2,U1]]
\end{mathin}
\begin{mathout}\label{out:ProjectionMatrix}
$\begin{pmatrix}
1 & 0 & 0 & 0 \\
0 & 1 & 0 & 0 \\
0 & 0 & 0 & 1 \\
2 & 4 & 6 & 3
\end{pmatrix}$
\end{mathout}
The determination of the projection matrices is closely connected to the problem of finding maximal subalgebras and we defer the description of its implementation in
LieART to the next section. Taking the projection matrix \outref{out:ProjectionMatrix} as given we demonstrate the algorithm of \com{DecomposeIrrep}
to find the branching rule for the \irrep{10} of \SU5 to $\SU3{\otimes}\SU2{\otimes}\U1$, which is
\begin{mathin}
DecomposeIrrep[Irrep[SU5][10],ProductAlgebra[SU3,SU2,U1]]
\end{mathin}
\begin{mathout}
$(\irrep{1},\irrep{1})(-6)+(\irrepbar{3},\irrep{1})(4)+(\irrep{3},\irrep{2})(-1)$
\end{mathout}
The LieART function \com{Project[\args{projectionMatrix},\args{weights}]} applies the \args{projectionMatrix} to each of the \args{weights} and a subsequent
\com{GroupProjectedWeights[\args{projectedWeights},\args{target}]} groups the Dynkin label of each of the \args{projectedWeights} according to the subalgebra specified by
\args{target}. In the case of our example each weight of the \irrep{10} of \SU5 decomposes to $\SU3{\otimes}\SU2{\otimes}\U1$ as:
\begin{mathin}
IrrepRule @@@ Transpose[\{WeightSystem[Irrep[SU5][10]],\newline
Row/@GroupProjectedWeights[Project[ProjectionMatrix[SU5, ProductAlgebra[SU3,SU2,U1]],WeightSystem[Irrep[SU5][10]]], ProductAlgebra[SU3,SU2,U1]]\}]
\end{mathin}
\begin{mathout}\label{out:Projected10SU5}
\nohangingindent
$\weight{0, 1, 0, 0} \rightarrow \weight{0, 1} \weight{0} \weight{4}$\newline
$\weight{1, {-}1, 1, 0} \rightarrow \weight{1, {-}1}\weight{0} \weight{4}$\newline
$\weight{{-}1, 0, 1, 0} \rightarrow \weight{{-}1, 0}\weight{0} \weight{4}$\newline
$\weight{1, 0, {-}1, 1} \rightarrow \weight{1, 0} \weight{1} \weight{{-}1}$\newline
$\weight{{-}1, 1, {-}1, 1} \rightarrow \weight{{-}1, 1}\weight{1} \weight{{-}1}$\newline
$\weight{1, 0, 0, {-}1} \rightarrow \weight{1, 0} \weight{{-}1}\weight{{-}1}$\newline
$\weight{{-}1, 1, 0, {-}1} \rightarrow \weight{{-}1, 1}\weight{{-}1}\weight{{-}1}$\newline
$\weight{0, {-}1, 0, 1} \rightarrow \weight{0, {-}1}\weight{1} \weight{{-}1}$\newline
$\weight{0, {-}1, 1, {-}1} \rightarrow \weight{0, {-}1}\weight{{-}1}\weight{{-}1}$\newline
$\weight{0, 0, {-}1, 0} \rightarrow \weight{0, 0} \weight{0} \weight{{-}6}$
\end{mathout}
The algorithm of \com{DecomposeIrrep} differs slightly and keeps only the dominant weights after projection and groups only them yielding
\begin{equation}
\left(\begin{array}{l}
\weight{0, 1} \weight{0} \weight{4}\\
\weight{1, 0} \weight{1} \weight{{-}1}\\
\weight{0, 0} \weight{0} \weight{{-}6}
\end{array}\right)
\end{equation}
for our example. A combination of the functions \com{GetAllProductIrrep} and \com{GetProductIrrep} filter out the product irreps, $(\irrepbar{3},\irrep{1})(4)$, $(\irrep{1},\irrep{1})(-6)$ and $(\irrep{3},\irrep{2})(-1)$ in our case, by applying
the function \com{GetIrrep} known from section \ref{ssec:TensorProductGeneric} to the weights.
\subsubsection{Branching Rules and Maximal Subalgebras}
To determine the projection matrices we start with the algorithm to find maximal subalgebras. Subalgebras fall into two classes: \emph{regular} and \emph{special} subalgebras, with the
first one being further categorized into non-semisimple and semisimple subalgebras. In the following we describe the derivation of the three types of maximal subalgebras: regular non-semisimple, regular semisimple and special subalgebras,
originally developed by Dynkin \cite{Dynkin:1957um,Dynkin:1957dm} and demonstrate how it is utilized by LieART to determine the projection matrices.
\paragraph{Non-Semisimple Subalgebras}
A non-semisimple subalgebra is a semisimple subalgebra times a \U1 factor, e.g. $\SU3{\otimes}\SU2{\otimes}\U1$. A subalgebra of this type is obtained by removing a dot from the Dynkin diagram. The resulting two or more disconnected
Dynkin diagrams symbolize the semisimple subalgebra and the removed dot, i.e., simple root, becomes the \U1 generator. E.g., the non-semisimple subalgebra $\SU3{\otimes}\SU2{\otimes}\U1$ can be
obtained from \SU5 by removing the third dot from its Dynkin diagram:
\begin{center}
\includegraphics{SU5ToSU3SU2U1DynkinDiagrams-crop.pdf}
\end{center}
Since the Dynkin label of a weight represents it composition of simple roots (explicitly in the $\alpha$-basis), dropping a simple root (dot) from the Dynkin diagram corresponds to
dropping the associated digit from the Dynkin label. The \U1 charge is the coefficient of the dropped simple root in the weight's linear combination of simple roots, i.e., the associated digit
of the Dynkin label in the $\alpha$-basis, which is often normalized to give integer values. Accordingly, in \outref{out:Projected10SU5} the third Dynkin digit of the weight of the \irrep{10} has been removed after the projection
and by expressing the weight system in the $\alpha$-basis
\begin{mathin}
AlphaBasis[WeightSystem[Irrep[SU5][10]]]//Column
\end{mathin}
\begin{mathout}\nohangingindent
(3/5, 6/5, 4/5, 2/5)\newline
(3/5, 1/5, 4/5, 2/5)\newline
(-2/5, 1/5, 4/5, 2/5)\newline
(3/5, 1/5, -1/5, 2/5)\newline
(-2/5, 1/5, -1/5, 2/5)\newline
(3/5, 1/5, -1/5, -3/5)\newline
(-2/5, 1/5, -1/5, -3/5)\newline
(-2/5, -4/5, -1/5, 2/5)\newline
(-2/5, -4/5, -1/5, -3/5)\newline
(-2/5, -4/5, -6/5, -3/5)
\end{mathout}
we see that the \U1 charge at the end are the third coordinate of the weight in the $\alpha$-basis multiplied by 5 to give integer values.
Writing the weights of the \irrep{10} as \emph{columns} of a matrix $\matrixhat{W}$ and the weights with the third digit expressed in non-normalized $\alpha$-basis coordinates moved to the end as rows of a matrix $\matrixhat{W}'$,
the projection matrix $\matrixhat{P}$ can be determined from
\begin{equation}
\matrixhat{P}\matrixhat{W} = \matrixhat{W}'
\end{equation}
with the right-inverse $\matrixhat{W}^{\!+}$ of $\matrixhat{W}$ (see section \ref{ssec:Bases}), since $\matrixhat{W}$ is in general not a rectangular matrix:
\begin{equation}
\matrixhat{P} = \matrixhat{W}'\matrixhat{W}^{\!+}.
\end{equation}
As mentioned above the projection matrix found by this procedure can now be used to decompose any \SU5 irrep into $\SU3{\otimes}\SU2{\otimes}\U1$. The \irrep{10} is actually not the smallest irrep needed for the determination
of the projection matrix. The \irrep{10} as well as all other irreps can be built from tensor products of the \irrep{5}, which we call the \emph{generating irrep} of \SU5.
In the orthogonal algebras only tensor products of the so-called \emph{spinor representations} can construct all other irreps of the algebra. Thus, they must be used for the determination of the projection matrices.
The generating irreps of representative Lie algebras are listed in Table \ref{tab:GeneratingIrreps}.
\begin{table}[h]
\begin{center}
\begin{tabular}{lll}
\toprule
\textbf{Algebra} & \textbf{Irrep} & \textbf{Irrep}\\
& \textbf{(Dynkin)} & \textbf{(Name)} \\
\midrul
\A4 (\SU5) & \dynkin{1,0,0,0} & \irrep{5} \\
\B4 (\SO9) & \dynkin{0,0,0,1} & \irrep{16} \\
\C4 (\Sp8) & \dynkin{1,0,0,0} & \irrep{8} \\
\D4 (\SO8) & \dynkin{0,0,0,1} & \irrepsub{8}{s} \\
\E6 & \dynkin{1,0,0,0,0,0} & \irrep{27} \\
\E7 & \dynkin{0,0,0,0,0,1,0} & \irrep{56} \\
\E8 & \dynkin{0,0,0,0,0,0,1,0} & \irrep{248} \\
\F4 & \dynkin{0,0,0,1} & \irrep{26} \\
\G2 & \dynkin{1,0} & \irrep{7} \\
\bottomrule
\end{tabular}
\caption{\label{tab:GeneratingIrreps} Generating Irreps of representative Lie algebras}
\end{center}
\vspace{-15pt}
\end{table}
In fact LieART excludes the zero-weights from the generating irreps, if any, i.e., only the lowest non-trivial orbit is needed for the determination of the projection matrices.
The calculation of a projection requires the knowledge of the simple root to drop from the Dynkin diagram for a specified algebra-subalgebra pair.
LieART provides an extra package file called \texttt{BranchingRules.m}, listing this information for the implemented branching rules along with special embeddings to be discussed later.
The file will be extended to encompass more branching rules in future versions of LieART, but may also be extended by the user. The definition for the more general branching rule $\SU{n}\to\SU{N{-}k}{\otimes}\SU{k}{\otimes}\U1$,
including the demonstrated case $\SU5\to\SU3{\otimes}\SU2{\otimes}\U1$, reads:
{\ttfamily\hangingindent
ProjectionMatrix[\pattern{origin}:Algebra[A][\pattern{n}\_],\newline
\hspace*{4.3ex}ProductAlgebra[Algebra[A][\pattern{m}\_],Algebra[A][\pattern{k}\_],Algebra[U][1]]] :=\newline
\hspace*{4.3ex}NonSemiSimpleSubalgebra[\pattern{origin},-\pattern{k}-1] /; \pattern{m}==(\pattern{n}-\pattern{k}-1)
}
\begin{figure}[t]
\begin{center}
\includegraphics[scale=0.95]{ExtendedDynkinDiagrams-crop.pdf}
\caption{\label{fig:ExtendedDynkinDiagrams} Extended Dynkin Diagrams of classical and exceptional simple Lie algebras.}
\end{center}
\end{figure}
\paragraph{Semisimple Subalgebras}
To obtain a semisimple subalgebra without a \U1 generator, a root from the
so-called \emph{extended Dynkin diagram} is removed. The extended Dynkin diagram
is constructed by adding the most negative root to the set of simple roots. (The
negative of the highest root $\gamma$ gives the most negative root $-\gamma$ to
form the extended Dynkin diagram.) The resulting set of roots is linearly
dependent, but removing one root restores the linear independence yielding a
valid system of simple root of a subalgebra, which in general is semisimple. The
highest roots $\gamma$ and the according extended root $-\gamma$ for
representative Lie algebras are listed in Table \ref{tab:MostNegativeRoots}.
\begin{table}[h]
\begin{center}
\begin{tabular}{lll}
\toprule
\textbf{Algebra} & \textbf{Highest Root} & \textbf{Extended Root}\\
& \boldmath$(\gamma)$ & \boldmath$({-}\gamma)$ \\
\midrul
\A4 (\SU5) & \rootomega{1, 0, 0, 1} & \rootomega{{-}1, 0, 0, {-}1} \\
\B4 (\SO9) & \rootomega{0, 1, 0, 0} & \rootomega{0, {-}1, 0, 0} \\
\C4 (\Sp8) & \rootomega{2, 0, 0, 0} & \rootomega{{-}2, 0, 0, 0} \\
\D4 (\SO8) & \rootomega{0, 1, 0, 0} & \rootomega{0, {-}1, 0, 0} \\
\E6 & \rootomega{0, 0, 0, 0, 0, 1} & \rootomega{0, 0, 0, 0, 0, {-}1} \\
\E7 & \rootomega{1, 0, 0, 0, 0, 0, 0} & \rootomega{{-}1, 0, 0, 0, 0, 0, 0} \\
\E8 & \rootomega{0, 0, 0, 0, 0, 0, 1, 0} & \rootomega{0, 0, 0, 0, 0, 0, {-}1, 0} \\
\F4 & \rootomega{1, 0, 0, 0} & \rootomega{{-}1, 0, 0, 0} \\
\G2 & \rootomega{0, 1} & \rootomega{0, {-}1} \\
\bottomrule
\end{tabular}
\caption{\label{tab:MostNegativeRoots} Highest roots $\gamma$ and most negative roots $-\gamma$ of representative Lie algebras.}
\end{center}
\vspace{-15pt}
\end{table}
The non-zero entries in the Dynkin label of ${-}\gamma$ prescribe to which
existing dot in the Dynkin diagram it should connect, since the Dynkin label in
the $\omega$-basis encode the angle between two simple roots. A ``1''
is an angle of 120\textdegree, symbolized by a single connected
line in the Dynkin diagram. A ``2'' is an angle of 135\textdegree,
expressed by a double line in the Dynkin diagram. The minus sign gives negative
angles or reverses the order of roots. The extended Dynkin diagrams for all
classical and exceptional Lie Algebras are shown in
\ref{fig:ExtendedDynkinDiagrams}. Please note the double line connecting the
extended root $-\gamma$ for \C{n}, is according to the ``${-}2$'' in its Dynkin label.
To demonstrate the determination of the projection matrix from using the
generating irrep, we cannot use an irrep of \SU{N}, because dropping a root
from the extended Dynkin diagram of \SU{N} returns \SU{N}. Thus, \SU{N} has no
\emph{regular} \emph{maximal} semisimple subalgebra. (Please note that some
\SU{N}'s have \emph{special} maximal semisimple subalgebras, e.g.
$\SU4\to\SU2{\otimes}\SU2$.) Instead we consider the subalgebra branching of
$\SO7{\to}\SU2{\otimes}\SU2{\otimes}\SU2$
($\B3{\to}\A1{\otimes}\A1{\otimes}\A1$). The maximal subalgebra
$\SU2{\otimes}\SU2{\otimes}\SU2$ is obtained from the extended Dynkin diagram of
\SO7 (\B3) by removing the second dot:
\begin{center}
\includegraphics{SO7ToSU2SU2SU2DynkinDiagrams-crop.pdf}
\end{center}
To derive the projection matrix, we investigate the decomposition of the \SO7 generating irrep (the \irrep{8}) into three $\SU2$.
Extending the Dynkin diagram with ${-}\gamma$ has the effect that each weight $w$ gets extended by one entry with the coefficient of
the weight relative to $-\gamma$, obtained by their scalar product: $\scalarproduct{w}{{-}\gamma}$. The so-called \emph{extended weight scheme}
of the lowest non-trivial orbit of a generating irrep is determined by the LieART function \com{ExtendedWeightScheme[\args{algebra},\args{simpleRootToDrop}]},
which directly removes the Dynkin digits associated to the simple root to drop, specified by \args{simpleRootToDrop}.
For the lowest non-trivial orbit of the generating irrep of \SO7 these two steps are:
\begin{equation}\label{eq:DropExtendedWeightScheme}
\begin{matrix}
\weight{0, 0, 1}\\\weight{0, 1, {-}1}\\\weight{1, {-}1, 1}\\\weight{{-}1, 0, 1}\\\weight{1, 0, {-}1}\\\weight{{-}1, 1, {-}1}\\\weight{0, {-}1, 1}\\\weight{0, 0, {-}1}
\end{matrix}
\xrightarrow{\text{insert ${{-}}\gamma$}}
\begin{matrix}
\weight{0, {-}1, 0, 1}\\\weight{0, {-}1, 1, {-}1}\\\weight{1, 0, {-}1, 1}\\\weight{{-}1, 0, 0, 1}\\\weight{1, 0, 0, {-}1}\\\weight{{-}1, 0, 1, {-}1}\\\weight{0, 1, {-}1, 1}\\\weight{0, 1, 0, {-}1}
\end{matrix}
\xrightarrow{\text{drop 2}}
\begin{matrix}
\weight{0, {-}1, 1}\\\weight{0, {-}1, {-}1}\\\weight{1, 0, 1}\\\weight{{-}1, 0, 1}\\\weight{1, 0, {-}1}\\\weight{{-}1, 0, {-}1}\\\weight{0, 1, 1}\\\weight{0, 1, {-}1}
\end{matrix}
\end{equation}
With the weight of the \SO7 generating irrep as columns of the matrix
$\matrixhat{W}$ and the weights in the $3\,\SU2$ decomposition (right-hand side
of \eqref{eq:DropExtendedWeightScheme}) as columns of $\matrixhat{W}'$ the
projection matrix $\matrixhat{P}$ is computed as described for non-semisimple
regular subalgebras as $\matrixhat{P}{=}\matrixhat{W}'\matrixhat{W}^{\!+}$ with
the right-inverse $\matrixhat{W}^{\!+}\!$ of $\matrixhat{W}$.
The definition for the branching rule $\SO7{\to}\SU2{\otimes}\SU2{\otimes}\SU2$ in the file \texttt{BranchingRules.m} reads:
{\ttfamily\hangingindent
ProjectionMatrix[\pattern{origin}:Algebra[B][3],\newline
\hspace*{4.3ex}ProductAlgebra[Algebra[A][1],Algebra[A][1],Algebra[A][1]]]:=\newline
\hspace*{4.3ex}SemiSimpleSubalgebra[\pattern{origin},2]
}
\paragraph{Special Subalgebras}
Special maximal subalgebras cannot be derived from the root system. The embedding of a special subalgebra does not follow a general pattern and must be derived for every algebra-subalgebra pair individually.
Generating irreps are also used to derive the subalgebra embedding, which may be simple or semisimple and can involve more than one irrep of the subalgebra. LieART is not equipped with an algorithm
to determine the maximal special subalgebras, but provides an interface to declare the embeddings (\texttt{BranchingRules.m}), which can be taken from the literature \cite{Slansky,McKay:99021}.
As an example we consider \SO7 (\B3) again, which has \G2 as special maximal subalgebra. The generating spinor irrep of \SO7, the \irrep{8}, decomposes to the \G2 singlet plus the \irrep{7}.
The weights of the \irrep{8} of \SO7 and the weights of both the \irrep{1} and \irrep{7} of \G2 are brought into lexicographical order to define the projection matrix:
\begin{equation}\label{eq:SO7ToG2}
\begin{array}{l@{\:\to\:}l}
\weight{1, 0, {-}1} & \weight{2, {-}1} \\
\weight{1, {-}1, 1} & \weight{1, 0} \\
\weight{0, 1, {-}1} & \weight{1, {-}1} \\
\weight{0, 0, 1} & \weight{0, 0} \\
\weight{0, 0, {-}1} & \weight{0, 0} \\
\weight{0, {-}1, 1} & \weight{{-}1, 1} \\
\weight{{-}1, 1, {-}1} & \weight{{-}1, 0} \\
\weight{{-}1, 0, 1} & \weight{{-}2, 1} \\
\end{array}
\end{equation}
Arranging the weights on the left-hand side of \eqref{eq:SO7ToG2} as columns of $\matrixhat{W}$ and the weights of the right-hand side as columns of $\matrixhat{W}'$, the projection matrix $\matrixhat{P}$ is
again computed via $\matrixhat{P}{=}\matrixhat{W}'\matrixhat{W}^{\!+}$ with the right-inverse $\matrixhat{W}^{\!+}\!$ of $\matrixhat{W}$.
These procedures are performed by the LieART function
\com{SpecialSubalgebra[\args{origin},\args{targetirreps}]}. The definition for the branching rule $\SO7{\to}\G2$ in the file \texttt{BranchingRules.m} reads:
{\ttfamily\hangingindent
ProjectionMatrix[\pattern{origin}:Algebra[B][3],ProductAlgebra[G2]]:=\newline
\hspace*{4.3ex}SpecialSubalgebra[\pattern{origin},\newline
\hspace*{4.3ex}\{ProductIrrep[Irrep[G2][0,0]],ProductIrrep[Irrep[G2][1,0]]\}]
}
Please note that irreps of the subalgebra must be gathered in a list (\{\ldots\}), even if it is a single irrep. The projection matrix for $\SO7{\to}\G2$ is
\begin{mathin}
ProjectionMatrix[B3,ProductAlgebra[G2]]
\end{mathin}
\begin{mathout}
$\begin{pmatrix}
2 & 1 & 0 \\
{-}1 & {-}1 & 0 \\
\end{pmatrix}$
\end{mathout}
|
{
"timestamp": "2012-06-28T02:06:19",
"yymm": "1206",
"arxiv_id": "1206.6379",
"language": "en",
"url": "https://arxiv.org/abs/1206.6379"
}
|
\section{Introduction}
Recently there has been a resurging interest in understanding the general orbital magneto-electric (ME) response\citep{Essin2009,Essin2010,Malashevich2010,Coh2011}. This is due to the fact that the isotropic magneto-electric effect or the so-called $\theta$-term, $\mathcal{L}_\theta=(\theta e^2/2\pi h)E\cdot B$ with $\theta=\pi$, is suggested to describe the the time reversal invarint (TRI) topological band insulator (TBI) in three spatial dimensions\cite{Qi08} (3d). Usually the signature of the 3d TBI is the existence of an odd number of Dirac cones on the boundary surfaces, when the time reversal symmetry (TRS) is preserved.\citep{Hasan2010} When the surface states are gapped out by breaking the TRS locally on the boundaries, a half integer Quantum Hall effect will take place, and give rise to a quantized bulk magneto-electric response.\cite{Qi08} It is later shown that this isotropic response is only a part of the more general anisotropic orbital ME tensor defined in the bulk\cite{Essin2010}:
\begin{equation}
\alpha_{ij}=\alpha_{\rm \theta}\delta_{ij}+\alpha_{{\rm 3d}ij},
\end{equation}
we define $\alpha_\theta=\frac13\mathrm{Tr}\alpha_{ij}$ and $\alpha_{\rm 3d}$ is therefore traceless. $\alpha_{ij}$ describes either the orbital magneto-polarizability (OMP) or the orbital electric susceptibility (OES):
\begin{equation}
\alpha_{ij}={\rm d}P_i/{\rm d}B_j={\rm d}M_j/{\rm d}E_i;
\end{equation}
OMP and OES are equal via a Maxwell relation.
One peculiarity of the ME tensor is that $\alpha_\theta$ is only determined up to integer multiples of $e^2/h$ by the bulk band structure.\cite{Malashevich2010,Essin2010} The specific value of $\alpha_\theta$ depends on the details at the boundary. From the polarization perspective, if we attach an integer quantum Hall (IQH) layer with filling $\nu=\pm 1$ respectively on the top and bottom surface of a cylinder, the orbital magneto-polarizability (OMP), i.e., ${\rm d}P/{\rm d}B$, along the axis of the cylinder will change by $e^2/h$, due to the density locking to the magnetic field of the top and the bottom IQH layer. From the magnetization perspective, if we attach an IQH layer with filling $\nu=1$ on the side surfaces of the cylinder, the orbital electric susceptibility (OES), ${\rm d}M/{\rm d}E$, will also change by $e^2/h$, due to the extra Hall current flowing on the surface in response to the electric field. Either way, the ME effect is only determined up to an integer multiple of $e^2/h$. In the bulk, this ambiguity corresponds well to the fact that $\theta$ as a coefficient in front of $(E\cdot B)$ is an angle only defined up to integer multiple of $2\pi$, because $\int E\cdot B {\rm d}^3 x{\rm d}t$ is quantized. $\alpha$ is odd under TRS, but this ambiguity makes it possible for $\alpha_\theta$ not to vanish with TRS preserved, as $\alpha_\theta=\pm e^2/2h$ is differed from each other by $e^2/h$. $\alpha_{\theta}=0$ and $\alpha_{\theta}=e^2/2h$ then describe two different insulating states of matter under TRS.
However, the above properties raise some questions. From the polarization perspective this ambiguity from the bulk is not so surprising, since the zero field polarization is already ambiguous with periodic boundary conditions.\cite{King93,Zak1989} From the magnetization perspective, however, this ambiguity is a bit more puzzling, because one commonly regards magnetization as a bulk property.\cite{Thonhauser05,Ceresoli06} In particular, with periodic boundary conditions there seems to be no reason to expect any ambiguity in the magnetization, whereas the ambiguity in the polarization is easily understood.
Before answering this rather specific question, we note that there is a even more general one: to what extent are thermodynamic quantities such as polarization, magnetization, and ME response determined by the bulk? Unlike the conventional thermodynamic quantities which are entirely independent of the boundary, we have already seen that boundary sometimes plays a role. How can we tell when will a thermodynamic variable depend on the boundary and when will it not?
In the following, we will discuss case by case from the ground state polarization, orbital magnetization, to the magneto-electric tensor. We will argue through Gedanken experiments that some of them depends on the boundary while others don't. We will verify our argument with numerical simulations. By matching the observations with our previous calculation done with periodic boundary conditions, we can then directly tell from the calculation with periodic boundary conditions how different thermodynamic quantities depend on boundaries.
\section{Ground state polarization}
The ground state polarization is given by the following formula with periodic boundary conditions:\cite{King93}
\begin{equation}
P=-ie\int_{BZ}\frac{{\rm d}^d k}{2\pi^d}\sum_{\alpha(k)\in occ}\bra{\alpha(k)}\frac{\partial}{\partial k}\ket{\alpha(k)}.
\end{equation}
In one spatial dimension (1d), the polarization is defined modulo $e$ with periodic boundary conditions: $P=P_0+ne$, with $n$ an integer. This corresponds to the observation that with periodic boundary conditions, we can move every electron to the next unit cell and return to the original state, while the two states should by definition have polarization differed by $e$. With two ends, the polarization will take one specific value, depending on the number of charges we put at the two ends.
However, if there are zero modes at the two ends, the polarization is then ambiguous, as theoretically we can consider superposition of states of different occupancy of the zero modes. The bulk value of the polarization thus depends entirely on the boundary.
In 3d it is a bit more interesting. For simplicity let us assume the system sits on a cubic lattice of size $a$. Now the bulk formula has an ambiguity of $e/a^2$, which also corresponds well to the fact that we can move every electron to the next unit cell and return to the same state. However, with boundary surfaces the situation becomes quite different. Consider a capacitor setup. We are allowed to put any number of charges on each of the opposing surfaces, resulting in a change of the polarization in units of $e/A$ (A is the total surface area). In the thermodynamic limit, we can put any finite density of charges on the surface, and the polarization in the bulk can take any value. Our bulk formula is thus no longer valid. To accommodate the charge on the surface, however, the system needs to either be in a metallic state near the boundary, or to break the lattice translation symmetry in the two in-plane directions. If neither condition is satisfied, then we can only add an integer number of electrons per unit cell, and the bulk formula is recovered, with the remaining ambiguity determined by the surface.
How can the bulk formula become invalid? We note that the ground state polarization can be understood as a Berry's phase when one adiabatically turns on the electric field. Firstly, in order for the Berry's phase to make any sense, the system has to be gapped. This is the reason why a metallic surface can render the bulk formula invalid. Secondly, if we break the lattice translation symmetry in the two directions perpendicular to the electric field, we can no longer integrate over the momentum in those directions but should instead sum over a large number of sub-bands labelled by the remaining momentum along the direction of the electric field. The polarization will have an ambiguity of $e/A$ in this case. This is different from the conventional thermodynamic quantity, which will require a symmetry breaking in the bulk to change its value. The Berry's phase is thus a rather fragile thermodynamic quantity.
\section{Ground state orbital magnetization}
It is not immediately obvious that the orbital magnetization is independent of the boundary. In the bulk the operator $\hat M\propto({\bf r\times v})$ is ill-defined with periodic boundary conditions, and seems to be growing as one goes near the boundary. Indeed, when one numerically compute $\langle\hat M\rangle$ summing over the local orbitals, there is a finite contribution from the boundary orbitals, which renders the total orbital magnetization different from the naive bulk value.~\cite{Ceresoli06} Nevertheless, it has been shown\cite{Ceresoli06} that the boundary contribution is in fact independent of the details at the boundary via the use of local Wannier functions, in an insulator with zero Chern number.
However, in a Chern insulator, a local Wannier function can not be found\cite{Thonhauser06,Thouless1984}, because the Bloch functions cannot be periodic and smoothly defined over the Brillouin Zone. To see that even in this case the orbital magnetization is still independent of the boundaries, we can consider the following setup:
Suppose we have an insulator with a non-vanishing Chern number in two dimensions. Let us imagine putting an auxiliary layer of insulator on top, with an opposite Chern number, without any interaction with the original one. The new insulator as a whole is then of total Chern number zero. We can therefore make a local Wannier orbital, by a linear combination of orbitals from the two layers.\cite{Soluyanov2011} The argument then goes through for the insulator as a whole, and the total orbital magnetization should be independent of the boundary. Now since there is no interaction between the two layers, the total magnetization is just the sum of the magnetization of the original insulator and the auxiliary insulator. We now consider a particular boundary condition, where the two insulators couples to independent boundary terms that do not interact with each other as well. Let us only vary the boundary terms that couple to the original insulator. The total magnetization cannot change, and neither the contribution from the auxiliary insulator. We thus have to conclude that even for a Chern insulator, the orbital magnetization is independent of the boundaries.
From this abstract point of view, the generalization to Chern insulators seems rather trivial. However, the presence of gapless chiral edge states may cause one to worry. Suppose we can gate the material to supply a constant chemical potential, what will happen if we turn up the electric potential on the edge? Will the edge current decrease because fewer edge states are occupied, or will it stays the same as required for the bulk magnetization not to change?
\begin{figure}[htb]
\centering
\subfigure[]{\includegraphics[width=3.5cm]{chern_1.eps}}
\subfigure[]{\includegraphics[width=3.5cm]{chern_2.eps}}
\subfigure{\includegraphics[width=1cm]{legend1.eps}}
\caption{We take our Hamiltonian to be \\$H=\sum_n c_n^\dag(\tau_z-i\tau_x)c_{n+\hat x}+c_n^\dag(\tau_z-i\tau_y)c_{n+\hat y}+mc_n^\dag\tau_zc_n+h.c.$,\\ where $\tau's$ are the Pauli matrices. At half filling with $m=1.5$, the band carries a Chern number $C_1=1$. If we set the chemical potential $\mu=0$, the ground state has no magnetization. We put the Hamiltonian on a $10\times 10$ lattice, and take open boundary conditions in both directions. The current on the vertical links is plotted. We relate the current to the magnetization by $I^b=\epsilon^{ab}\partial_aM$, and take the magnetization at the middle to represent the bulk magnetization. (a) $\mu=0.5$. As expected, some edge states are occupied and give rise to a bulk magnetization. (b) If we set $\mu=0$ but locally apply an electric potential $V=-0.5$ to the first two rows at the boundary, the edge states are again occupied. However, in the region next to those layers, a counterpropagating current takes place. The bulk magnetization remains zero (barring some finite size effect).
}
\label{chern}
\end{figure}
We do a straightforward numerical simulation to resolve this paradox. The result is shown in Fig. \ref{chern}. We can see that while shifting the overall chemical potential creates circulating currents, altering the electric potential locally at the edge does not change the bulk magnetization. If we look closer, while the current right at the edge is changed, there is a counter-propagating current near the edge, which keeps the total current localized near one edge constant. The counter-propagating current is just the integer quantum Hall response to the electric potential gradient. This bulk quantum Hall current exactly compensates for the current carried by the now-unoccupied edge states, and leaves the bulk magnetization insensitive to the change of the potential local near the edge.
A very similar puzzle arises in the $S_z$ conserved spin Hall insulator. On the edge there are counter-propagating TR-paired edge states. When we apply a uniform Zeeman field $H_z$, there will be a net circulating current from the edge states. We can therefore deduce a bulk orbital magnetization response to the Zeeman field. We call this the orbital-Zeeman susceptibility. However, one can locally break the $S_z$ conservation together with the TR symmetry near the edge, to gap out the edge states. In this case, will there still be a bulk magnetization response to the Zeeman field?
The numerical result is shown in Fig. \ref{spinhall}. Here we can see that even though the edge states are gapped out by the local perturbations, the total current flowing near the edge remains the same. The local perturbation transfers the current from the states at the Fermi level, to the occupied bands. In the end, while local properties can affect the gapless states, the total current near the edge in the is unaffected.
\begin{figure}[htb]
\centering
\subfigure[]{\includegraphics[width=4cm]{sh_1.eps}}
\subfigure[]{\includegraphics[width=4cm]{sh_2.eps}}
\subfigure[]{\includegraphics[width=3.5cm]{sh_3.eps}}
\subfigure[]{\includegraphics[width=3.5cm]{sh_4.eps}}
\subfigure{\includegraphics[width=0.9cm]{legend2.eps}}
\caption{We now think of the previous model as from spin up electrons and pair it with its time reversal. We applied a unifrom Zeeman field $\delta H_z=0.2\sum_n c_n^\dag S_zc_n$. (a) We plot the eigenstate energies in ascending order. The edge states live inside the gap. (b) By applying a time-reversal as well as $S_z$ symmetry breaking term near the boundary $\delta H=\sum_{\rm n\in edge}c^\dag_n S_x c_n$, we can gap out the edge states. (c)-(d) We look at the current on the vertical links. While the current distributes slightly differently with or without the symmetry breaking term at the edges, the contributions to the bulk magnetization are identical.}
\label{spinhall}
\end{figure}
We therefore conclude that the orbital magnetization, as well as the orbital-Zeeman susceptibility is independent of the boundary for an insulator. While the circulating current may be carried by the edge states, the total amount is entirely insensitive to the local boundary conditions. One can understand this from a calculation with periodic boundary conditions: the magnetization is calculated as an energy density in a magnetic field. The total energy, unlike the Berry's phase, is a truly extensive property, so that the boundary contribution is irrelevant in the thermodynamic limit. The energy density in the bulk is thus entirely independent of the boundaries far away enough, whether there are gapless states or not.
\section{magneto-electric effect}
After the discussion of the polarization and the magnetization and seeing that they are thermodynamic quantities with very different behaviors, it is thus a natural question to ask the same question about the ME tensor; in addition, about how the Maxwell relation can be maintained. Before going into details of the boundary dependence, however, let us first show that the anisotropic part $\alpha_{3d}$ is independent of the boundaries.
In terms of electronic Green's functions and with periodic boundary conditions, we have derived the ME tensor from the OMP perspective, as a Berry's phase in a magnetic field:\cite{Tim2011c}
\begin{eqnarray}\label{ompg}
\alpha_{ij}&=&(\alpha_{\rm wzw}+\alpha_{\rm 3d})_{ij},\nonumber\\
{\alpha_{\rm wzw}}_{ij}&=&-\frac{\pi i}6 \epsilon_{abcd}\mathrm{Tr}^S({\bf g}\partial_a{\bf g}^{-1}{\bf g}\partial_b{\bf g}^{-1}{\bf g}\partial_c{\bf g}^{-1}{\bf g}\partial_d{\bf g}^{-1}{\bf g})\delta_{ij};\nonumber\\
{\alpha_{\rm 3d}}_{ij}&=&-\frac{i}6\epsilon_{abj}\mathrm{Tr}\big(g\partial_ig^{-1}g\partial_ag^{-1}g\partial_bg^{-1}g-h.c.\big).
\end{eqnarray}
The traces include the frequency and momentum integral divided by factors of $(2\pi)$; the symbol $\mathrm{Tr}^S$ denotes the integral and trace in one extra dimension in momentum space, with the original Brillouin zone and a trivial test system as the boundary. While the entire ME tensor is derived as a Berry's phase, $\alpha_{\rm 3d}$ does not depend on the Green's function extended to the extra dimension. Without considering boundaries directly, we can show that $\alpha_{\rm 3d}$ is independent of the boundaries, by showing it extends smoothly to finite frequency and momentum.
At finite frequency and momentum, the ME response is understood as a term in the effective action which is proportional to $E^i(q,\omega)B^j(-q,-\omega)$. Unlike the uniform ME response however, this term can no longer be understood as OMP or OES, due to the fact that unlike uniform electromagnetic fields, the electric and magnetic fields at finite frequency and momentum are related by Faraday's law. The term nevertheless affects properties of the propagating electromagnetic waves. For our purposes, it suffices to show that the effective Lagrangian is continuous from $q=0$ to $q\rightarrow 0$. At any $q\neq 0$, we can calculate the effective Lagrangian by the conventional diagrammatic method. Calculated in the Appendix, the bubble diagram gives
\begin{equation}
S_{ME}
=-\int\frac{{\rm d^4}q}{(2\pi)^4} B^\ell(q) E^k(-q)\alpha_{{\rm 3d}k\ell}+\mathcal{O}(q).
\end{equation}
Comparing with Eq.~(\ref{ompg}), we see that $\alpha_{\rm 3d}$ is continuous, whereas $\alpha_{\rm wzw}$ is entirely absent at finite momentum. One might worry that we have missed $\alpha_{\rm wzw}$ in momentum space due to the fact that it is a total derivative in real space, which Fourier transforms to zero and cannot be seen in momentum space. However, one can evaluate the diagram in real space, and it is still absent. Fundamentally this is due to the fact that the conventional perturbation theory is perturbative in orders of the gauge field, which breaks down with uniform field strength. Nevertheless, combining the two calculation, we can still say that $\alpha_{\rm 3d}$ is a bulk property and is independent of the boundaries. $\alpha_{\rm wzw}$, on the other hand, is similar to the polarization: it does depend on the boundary, but when there is no boundary, it presents itself as a Berry's phase. Note that one benefit of using the Green's function is that the separation of the local terms and boundary terms matches exactly how the expression depends on the extra dimension or not. This is not the case if we use the density matrices, either to calculate the same Berry's phase\cite{Tim2011a}, or to calculate a current response to a pumping procedure\cite{Essin2010}. In both calculations the ME tensor naturally separates into two terms, with the first term independent of the energy gap:
\begin{equation}
\alpha=\alpha_{\rm cs}+\alpha_{\rm G};
\end{equation}
$\alpha_{\rm cs}$ is isotropic, but $\alpha_{\rm G}$ is not traceless. While $\alpha_{\rm G}$ can be uniquely determined by the bulk band structure and is independent of the boundaries, its trace is actually not measurable in the bulk.
Let us now focus at the isotropic part $\alpha_{\rm wzw}$. In terms of polarization in a magnetic field, the ambiguity is no surprise. However, how does the ambiguity of the orbital magnetization in a electric field come about?
One origin of the ambiguity is from the fact that the perturbation of a uniform electric field grows with distance. It therefore naturally depends on the boundary, when there is one. When we consider periodic boundary conditions, however, it becomes less clear.
In order to study the OES with periodic boundary conditions, we first have to properly define the magnetization with periodic boundary conditions. Without the current at the boundary, one sensible definition of the magnetization is from the relation $B=H+M$. That is, in the absence of applied current (which generates $H$), the magnetization simply equals the measured magnetic field. Note that with periodic boundary conditions and a finite volume, the magnetic field is quantized, because the total magnetic flux through the sample is quantized in units of $h/e$. In this case we take the perspective that the magnetic field will take the closest quantized value to the magnetization while the magnetization itself is still continuous.
In our previous work\cite{Tim2011a}, we have shown that in a magnetic field, the $\theta$ term, which characterizes the isotropic part of the OMP, changes the quantization condition of the global electric flux. The ground state of the system thus carries an electric flux of -$(\theta e^2/2\pi h)\Phi_B+ne$, where $n$ is some integer that minimizes the flux. Using $0=D=E+P$, the $\theta$ term thus gives an isotropic orbital magneto-polarization response $\frac{\partial P}{\partial B}=\frac{\theta e^2}{2\pi h}$. However, this result is valid only when $(\Phi_B\theta e/2\pi h)<1$. In the thermodynamic limit this condition is always violated, and instead $\frac{\partial P}{\partial B}=0$.
Similarly, to see whether the same term contributes to the OES of the system, we would like to investigate whether there is a uniform magnetic field, when we constrain the path integral to have a given average electric field in the same direction. However, the electric field and the magnetic field behave in intrinsically different ways, when we formulated our theory assuming the existence of electric charges and the absence of magnetic monopoles: the quantization of the electric flux can change in the presence of the magnetic field, while the quantization of the magnetic flux is fixed at $(h/e)$. When we apply an electric flux, we can always imagine that the system is a coherent state composed of states with integer electric fluxes. The background magnetic field therefore does not have to be different from zero. Therefore, even at finite size, the $\theta$ term does not give rise to the OES. \textit{The Maxwell relation between the the isotropic OMP and the OES are thus violated.} They are only equal in the thermodynamic limit, where the $\theta$ term gives no contribution for both quantities. In other words, the isotropic OES is better thought of as a bulk-induced surface response, which vanishes when there is no boundary surfaces.
Now let us consider geometry with boundaries in some detail. From the result of Ref.~\cite{Malashevich2010}, we know that with open boundary conditions in all directions, the OES has an ambiguity only determined by specific surface boundary conditions. We have also seen in the introduction that in a cylinder geometry, the ambiguity of the OES can come from the quantized Hall current on the side surfaces.
\begin{figure}[htb]
\centering
\includegraphics[width=8cm]{dmde.eps}
\caption{Here we plot the calculated OES versus the number of layers in the $z$-direction, with the model described by Eq.~(73) in Ref.~\cite{Qi08} with $\theta=0$, $m=c=1$. We take $\theta=0.5\pi$ at the top and the bottom layer to gap out the edge states. (If we take $\theta=\pm 0.5\pi$ on the two surfaces respectively, the whole system will be a Chern insulator and can no long be kept at charge neutrality without closing the gap in a magnetic field.) We put on an electric field such that the potential difference between the top and the bottom layer is $0.2$. The boxes show the calculated values. The solid curve is a fit by assuming a fixed width $w$ of the surface charges when there is a magnetic field, such that $\frac{{\rm d}M}{{\rm d}E}\propto(1-\frac wn)$. The fit gives gives $\frac{{\rm d}M}{{\rm d}E}=0.50 \frac{e^2}{h}$ in the thermodynamic limit and $w=2.54$. The OES changes sign as expected, when we change to $\theta=-0.5\pi$ instead on the boundary.}
\label{OES}
\end{figure}
What if there are no side surfaces? Suppose we take periodic boundary conditions only in two directions to get rid of the side surfaces. Does the OES still have the same ambiguity? One naively would expect the situation to be similar to the case with periodic boundary conditions, due to the absence of the possible circulating Hall currents. However, a more careful argument shows it is not the case. In fact, the system will spontaneously generate a magnetic field, which will then generate surface charge density $\sigma=\pm(\nu+\theta/2\pi)e^2B/h$ via the OMP response, to lower the electric energy. Minimizing the total energy as a function of $B$, we then get $B=M=(\nu+\theta/2\pi)e^2E/h$. While at finite size the total magnetic flux is quantized in units of $h/e$ in this setup, in the thermodynamic limit, the magnetic field will converge to the expected value, in contrast to the situations with periodic boundary conditions where it stays at zero. We have numerically confirmed this result by calculating the magnetization in the electric field, using the momentum space formula for the magnetization, derived in Ref.~\cite{Ceresoli06}, as shown in Fig.~\ref{OES}.
Before summing up, let us consider how gapless surface states can alter the ME response. Evidently, if we attach a fractional quantum Hall state on the side of the cylinder, the OES is going to change by a fraction of $e^2/h$.\cite{Brian2011} In general the fraction is quite arbitrary, so in this case the bulk value of the isotropic OES is not valid. This corresponds to the fact that the fractional quantum Hall state has ground state degeneracy. In general, we will therefore expect any gapless surface state will destroy the bulk description of the isotropic ME response.
To sum up, The anisotropic part of the ME tensor $\alpha_{\rm 3d}$ is independent of the boundaries. The isotropic part $\alpha_{wzw}$ depends partially on the boundary. While $\alpha_{\rm 3d}$ is a truly local quantity, $\alpha_{\rm wzw}$ only lives at $q=0$. Corroborating with the fact that both isotropic OES and OMP responses vanish with periodic boundary conditions in the thermodynamic limit, it is better to think of $\alpha_{\rm wzw}$ as a quantized surface effect induced by the bulk.
\section{conclusion}
We have thus gone through polarization, magnetization, and magneto-electric responses and see their dependence on the boundary.
A bulk calculation done with periodic boundary conditions contains enough information to predict how the quantity in question can depend on the boundary, however. In particular, using our formalism described in Ref.~\cite{Tim2011c}, any quantity that does not involve an extension of the Green's function to one extra dimension is independent of the boundary. On the other hand, quantities that requires an extension to extra dimension will depend on the boundary. The bulk can determine its value up to some quantized amount, only when (i) there are no gapless surface states, (ii) surfaces break no symmetry that is required to determine the bulk value with periodic boundary conditions, and (iii) the system is kept at charge neutrality. If any of the conditions are violated, the surface contribution will dominate and render the results obtained with periodic boundary conditions invalid.
\section*{Acknowledgement}
We thank X.G. Wen and N. Nagaosa for insightful discussions. This work is supported by NSF grant DMR 1104498.
|
{
"timestamp": "2012-06-28T02:04:54",
"yymm": "1206",
"arxiv_id": "1206.6338",
"language": "en",
"url": "https://arxiv.org/abs/1206.6338"
}
|
\section{\@startsection {section}{1}{\z@}%
{-3.5ex \@plus -1ex \@minus -.2ex
{2.3ex \@plus.2ex}%
{\normalfont\large\bfseries}}
\renewcommand\subsection{\@startsection{subsection}{2}{\z@}%
{-3.25ex\@plus -1ex \@minus -.2ex}%
{1.5ex \@plus .2ex}%
{\normalfont\bfseries}}
\renewcommand\subsubsection{\@startsection{subsubsection}{3}{\z@}%
{-3.25ex\@plus -1ex \@minus -.2ex}%
{1.5ex \@plus .2ex}%
{\normalfont\itshape}}
\makeatother
\def\pplogo{\vbox{\kern-\headheight\kern -29pt
\halign{##&##\hfil\cr&{\ppnumber}\cr\rule{0pt}{2.5ex}&\footnotesize{}} \date{\cr}}}
\makeatletter
\def\ps@firstpage{\ps@empty \def\@oddhead{\hss\pplogo}%
\let\@evenhead\@oddhead
\def\maketitle{\par
\begingroup
\def\fnsymbol{footnote}{\fnsymbol{footnote}}
\def\@makefnmark{\hbox{$^{\@thefnmark}$\hss}}
\if@twocolumn
\twocolumn[\@maketitle]
\else \newpage
\global\@topnum\z@ \@maketitle \fi\thispagestyle{firstpage}\@thanks
\endgroup
\setcounter{footnote}{0}
\let\maketitle\relax
\let\@maketitle\relax
\gdef\@thanks{}\gdef\@author{}\gdef\@title{}\let\thanks\relax}
\makeatother
\numberwithin{equation}{section}
\newcommand{\nonumber}{\nonumber}
\newcommand{M_{pl}}{M_{pl}}
\newcommand{\frac{1}{2}}{\frac{1}{2}}
\newcommand{\frac{1}{4}}{\frac{1}{4}}
\newcommand{\h}[1]{\hat{#1}}
\renewcommand{\v}[1]{\vec{#1}}
\renewcommand{\dag}{\dagger}
\newcommand{\dd}[2]{\frac{\partial #1}{\partial #2}}
\newcommand{\VEV}[1]{\ensuremath{\langle #1 \rangle}}
\newcommand{\mathcal{L}}{\mathcal{L}}
\renewcommand{\H}{\mathcal{H}}
\newcommand{\mathcal{O}}{\mathcal{O}}
\newcommand{\mathcal{N}}{\mathcal{N}}
\newcommand{\mathcal{M}}{\mathcal{M}}
\newcommand{\epsilon}{\epsilon}
\renewcommand{\d}{\delta}
\newcommand{\partial}{\partial}
\newcommand{\Theta}{\Theta}
\renewcommand{\th}{\theta}
\newcommand{\omega}{\omega}
\renewcommand{\d}{\partial}
\newcommand{\begin{equation}}{\begin{equation}}
\newcommand{\begin{eqnarray}}{\begin{eqnarray}}
\newcommand{\end{equation}}{\end{equation}}
\newcommand{\end{eqnarray}}{\end{eqnarray}}
\newcommand{\ee}{\end{equation}}
\newcommand{\frac}{\frac}
\newcommand{\mathcal}{\mathcal}
\newcommand{{\rm Tr}}{{\rm Tr}}
\newcommand{{\rm tr}}{{\rm tr}}
\renewcommand{\t}{\tilde}
\newcommand{\mu_\phi}{\mu_\phi}
\newcommand{\tilde{N}}{\tilde{N}}
\newcommand{\tilde{\Lambda}}{\tilde{\Lambda}}
\newcommand{\tilde{Z}}{\tilde{Z}}
\newcommand{m_{H_d}^2}{m_{H_d}^2}
\newcommand{m_{H_u}^2}{m_{H_u}^2}
\newcommand{{\rm Vol}_{{\rm closed}}}{{\rm Vol}_{{\rm closed}}}
\newcommand{{\rm Vol}_{{\rm open}}}{{\rm Vol}_{{\rm open}}}
\newcommand{y_u}{y_u}
\newcommand{y_d}{y_d}
\newcommand{y_l}{y_l}
\newcommand{y_u^\dagger}{y_u^\dagger}
\newcommand{y_d^\dagger}{y_d^\dagger}
\newcommand{y_l^\dagger}{y_l^\dagger}
\newcommand{g_{H_2}}{g_{H_2}}
\newcommand{g_{H^*_2}}{g_{H^*_2}}
\newcommand{g_{H_1}}{g_{H_1}}
\newcommand{g_{H^*_1}}{g_{H^*_1}}
\newcommand{\pd}[2]{\frac{\partial #1}{\partial #2}}
\textwidth = 6.5 in
\textheight = 8.5 in
\oddsidemargin = 0.0 in
\evensidemargin = 0.0 in
\parskip = 9pt
\renewcommand{\textfraction}{0.0}
\renewcommand{\arraystretch}{1.5}
\begin{document}
\setcounter{page}0
\def\ppnumber{\vbox{\baselineskip14pt
}}
\def\footnotesize{}} \date{{\footnotesize{}} \date{}
\author{Carlos Tamarit\\
[7mm]
{\normalsize \it Perimeter Institute for Theoretical Physics}\\
{\normalsize \it Waterloo, ON, N2L 2Y5, Canada}\\
[3mm]
{\tt \footnotesize ctamarit at perimeterinstitute.ca}
}
\title{\bf Large, negative threshold contributions to light soft masses in models with Effective Supersymmetry
\vskip 0.5cm}
\maketitle
\begin{abstract} \normalsize
\noindent Threshold contributions to light scalar soft masses due to heavy sparticles (possibly including a heavy Higgs mostly aligned with $H_d$) in Effective SUSY scenarios are dominated by two-loop diagrams involving gauge couplings. This is due to the fact that in the limit in which the heavy states are degenerate, their one-loop contributions to the light soft masses only depend on small Yukawas and the hypercharge coupling. The two-loop threshold corrections involving only gauge couplings are calculated accounting for nonzero gaugino and light squark masses and shown to be negative, and rather large ($\delta m^2_{t,L}\sim-480^2\,{\rm GeV}^2$ for heavy sparticles with masses around 10 TeV). The effect on tachyon bounds is revisited with calculations implementing decoupling. It is pointed out that models yielding Effective SUSY spectra using gaugino mediation require in general very heavy gluinos or a very low SUSY breaking scale in order to avoid tachyons (e.g. for heavy squarks at 10 TeV and a SUSY
breaking
scale of
125 TeV, minimal scenarios require $\tilde m_3\gtrsim 2$ TeV at 500 GeV, while nonminimal ones demand $\tilde m_3\gtrsim 8$ TeV).
\end{abstract}
\bigskip
\newpage
\newpage
\section{Introduction}
Effective Supersymmetry (SUSY) scenarios \cite{Dimopoulos:1995mi,Cohen:1996vb,Brust:2011tb}, in which the first and second generation scalars of the Minimal Supersymmetric Standard Model (MSSM) are heavy --as well as possibly some of the third generation scalars, always excluding the left-handed quark doublet and the right-handed stop-- remain well-motivated realizations of Supersymmetry which are natural, solve the flavor problem and are poorly constrained by the ongoing searches at the LHC due to the difficulty in separating light stop signals from top quark backgrounds \cite{Plehn:2012pr,Han:2012fw}.
In the absence of strong experimental constraints, some theoretical ones have been known for a while. It was pointed out in ref.~\cite{ArkaniHamed:1997ab} that, in the case of high-scale SUSY breaking, the Renormalization Group (RG) effects of the heavy scalars can drive the soft masses of the light third generation scalars towards tachyonic values, opening the possibility of phenomenologically disfavored charged or colored vacua. In ref.~\cite{Tamarit:2012ie} it was pointed out that the large hierarchies in the spectrum of sparticles called for an analysis that explicitly implemented the decoupling of heavy particles, which was shown to relax the tachyon bounds coming from the study of the RG evolution \cite{Tamarit:2012ry}.
In keeping with the idea of performing accurate calculations in Effective SUSY scenarios, it is necessary to examine the effect of finite threshold contributions due to the heavy sparticles at the scale at which they are integrated out. The threshold contributions to the soft masses of the light scalars will involve, on dimensional grounds, the masses of the heavy particles in the loops, and thus are expected to be significant. One-loop threshold effects in the MSSM are well known \cite{Pierce:1996zz}. It turns out that in the limit of degenerate heavy scalars --and, if a heavy Higgs state is present, in scenarios in which it is mostly aligned with $H_d$-- these one-loop contributions only depend on small Yukawa couplings and the hypercharge gauge coupling, and may be negative. Results for two-loop threshold corrections due to heavy fields have been obtained in refs. \cite{Agashe:1998zz} and \cite{Hisano:2000wy}, following the results in ref. \cite{Martin:1996zb}, but neglecting the masses of the light
sparticles.
Since two-loop diagrams involve the strong gauge coupling, the previous observations suggest that they can be the dominant contributions to the threshold corrections of the light soft masses, or could be relevant to compensate for the one-loop tachyonic contributions. As previous computations ignored the soft masses of gluinos and the light squarks, this paper presents the corresponding results when they are taken into account. This is the proper thing to do when performing the computations by integrating heavy particles at their thresholds: first, the hidden sector fields that break SUSY are integrated out, yielding the MSSM with nonzero soft masses, and next the heavy MSSM scalars are also integrated out at their corresponding scales. The calculations for the threshold contributions of the heavy scalars are performed in the $\overline{\rm MS}$ scheme, and they can be directly matched with the results for the low energy, nonsupersymmetric theories obtained after
decoupling the heavy states in refs.~\cite{Tamarit:2012ie,Tamarit:2012ry}. The computation is similar in spirit to that leading to the scalar soft masses in gauge mediation; differences stem from the absence of loops of massive fermions, the absence of mixing of the heavy scalars, the presence of new hypercharge dependent contributions, and the fact that nonzero masses for the gluinos and the light scalars are considered in the propagators.
The result is that these two-loop corrections evaluated at the threshold of the heavy sparticles are negative, and rather large; also, nonzero gluino masses have a sizable impact and tend to enhance the threshold effects, while the dependence on the masses of the light scalars is weaker. This of course contradicts the analogy with minimal gauge mediation, which may have suggested that the first and second generation fields could act as messenger fields that transmit SUSY breaking to the third generation; rather, the heavy fields tend to destabilize the light scalars.
These negative threshold effects call for a reappraisal of the lower bounds for the high scale boundary values of the light scalar masses obtained by demanding the absence of tachyonic squarks and sleptons. Also, in models in which the third generation squark masses arise as a result of gaugino mediation \cite{Craig:2011yk,Craig:2012hc,Cohen:2012rm}, one may obtain in a similar way lower bounds for gaugino masses, since these will have to be large enough to compensate for the tachyonic RG and finite corrections.
The paper is organized as follows. One-loop gauge-coupling dependent contributions are reviewed in section~\ref{sec:oneloop}. Section~\ref{sec:twoloop} centers on the two-loop contributions. In view of the results, tachyon bounds on high scale light scalar masses are revisited in section~\ref{sec:bounds} using the two-loop RG equations of ref.~\cite{Tamarit:2012ie} supplemented with the threshold corrections obtained in this paper; similarly, bounds on gaugino masses are obtained in models involving gaugino mediation for the third generation. Section~\ref{sec:models} summarizes the results.
\section{\label{sec:oneloop}One loop contributions}
Neglecting off-diagonal Yukawas and a-terms that mix light and heavy scalars in Effective SUSY scenarios, and assuming degenerate heavy states with mass $M$,
the one-loop threshold correction at a scale $\mu$ in the $\overline{\rm MS}$ scheme in the Feynman gauge for a light soft mass $m^2_i$ due to the heavy squarks and sleptons is
\begin{equation}
\label{eq:oneloopthresh}
\delta (m^2_i)^{1\,\rm loop}_{\tilde q,\tilde l}(\mu)=-\frac{g_1^2}{16\pi^2}Y_i\sum_j({d}_j Y_j)M^2\left(1-\log\frac{M^2}{\mu^2}\right).
\end{equation}
This contribution, which comes from diagrams with a quartic vertices coming from D-terms, is included in the general formulae of ref.~\cite{Pierce:1996zz}, which are written for the nondegenerate case and include nonzero mixing angles. In the expression above, $Y$ designates hypercharges. The sum in $j$ is over all the U(1) representations of the heavy scalars fields, whose dimension is denoted by ${d}_j$. In minimal Effective SUSY scenarios, the heavy scalars include those of the first two generations plus the sleptons and right-handed sbottom of the third generation, yielding $\sum_j({d}_j Y_j)=1$, while in nonminimal scenarios all fields in the third generation are light, which gives $\sum_j({d}_j Y_j)=0$. The absence of contributions dependent on the gauge couplings $g_2,\,g_3$ is due to the degeneracy of the heavy fields and the identities ${\rm Tr}\,T^a=0$ for SU(2) and SU(3) groups.
In Effective SUSY models in which the combination of Higgs doublets
\begin{align}
{\cal H}_{\rm heavy}=\sin\alpha H_u-\cos\alpha H^\dagger_d
\label{eq:heavyH}
\end{align}
is also made heavy as well, and assuming that it also has a mass $M$, the formula above is still valid if the following substitution is used
\begin{align}\nonumber
&\sum_j({d}_j Y_j)=2\sin^2\alpha,\quad {\text{ minimal Effective SUSY scenarios with a single light Higgs}},\\
&\sum_j({d}_j Y_j)=-\cos2\alpha,\quad {\text{ nonminimal Effective SUSY scenarios with a single light Higgs}}\label{eq:sumY}.
\end{align}
The heavy Higgs has additional contributions proportional to diagonal Yukawas and a-terms. The former are again quadratic in the heavy mass $M$, while the latter are proportional to the trilinear couplings squared, and may be neglected assuming $a_i\ll M$. In this way one obtains the following threshold contributions due to the heavy Higgs field (ignoring again off-diagonal Yukawas):
\begin{align}
\label{eq:oneloopthreshH}\delta (m^2_Q)^{1\,\rm loop}_{\cal H}(\mu)&=-\frac{1}{16\pi^2}(y_t^2\sin^2\alpha+y_b^2\cos^2\alpha)M^2\left(1-\log\frac{M^2}{\mu^2}\right),\\
\nonumber\delta (m^2_U)^{1\,\rm loop}_{\cal H}(\mu)&=-\frac{1}{8\pi^2}y_t^2\sin^2\alpha M^2\left(1-\log\frac{M^2}{\mu^2}\right),\\
\nonumber\frac{1}{y_b^2}\,\delta (m^2_D)^{1\,\rm loop}_{\cal H}(\mu)&=\frac{2}{y_\tau^2}\,\delta (m^2_L)^{1\,\rm loop}_{\cal H}(\mu)=\frac{1}{y_\tau^2}\,\delta (m^2_E)^{1\,\rm loop}_{\cal H}(\mu)=-\frac{1}{8\pi^2}\cos^2\alpha M^2\left(1-\log\frac{M^2}{\mu^2}\right).
\end{align}
The Yukawa couplings in the formulae above are those in the MSSM. Clearly, the threshold contributions of eqs.~\eqref{eq:oneloopthresh} and \eqref{eq:oneloopthreshH} may be sizable and negative when evaluated at the scale $\mu=M$ at which the heavy particles are integrated out. The Higgs contributions can be made small by choosing small values of $\alpha$, since then $\cal H$ is in the direction of $H_d$ and couples through the small Yukawas $y_b,y_\tau$. (However, in a consistent Higgs decoupling limit one has $\alpha\sim \frac{\pi}{2}-\beta$, where $\tan\beta=\frac{v_u}{v_d}$ is the ratio of Higgs VEVs \cite{Tamarit:2012ry}. Therefore small $\alpha$ implies large $\tan\beta$, which enhances the down Yukawas, so that $\alpha$ should not be too small). As an example, for $\cot\alpha\sim\tan\beta=10$ in nonminimal scenarios with a single Higgs, $M=\mu=10 {\rm TeV}$, one has the following threshold contributions, obtained by using the RG equations of ref.~\cite{Tamarit:2012ie} and matching the couplings with
the experimental data as in ref.~\cite{Tamarit:2012ry}:
\begin{align*}
\nonumber&\delta m^2_{\tilde q_L}\sim-93^2 {\rm GeV}^2,\quad \delta m^2_{\tilde t_R}\sim -254^2 {\rm GeV}^2, \quad \delta m^2_{\tilde b_R}\sim -96^2 {\rm GeV}^2,\quad \delta m^2_{\tilde L}\sim -222^2 {\rm GeV}^2,\\
&\delta m^2_{\tilde e_R}\sim 265^2 {\rm GeV}^2,\quad \delta m^2_{H_u}\sim -222^2 {\rm GeV}^2.
\end{align*}
\section{\label{sec:twoloop}Gauge-coupling dependent two loop diagrams involving heavy scalars}
The results of the previous section show that one-loop finite corrections to the light soft masses due to degenerate heavy scalars may be negative when evaluated at the corresponding thresholds. Since for small $\alpha$ they only involve small Yukawas and the hypercharge coupling, this brings up the question of whether two-loop diagrams, which will also feature the strong gauge coupling, may or not partially cancel them. In the small $\alpha$ limit in which heavy states couple through small Yukawas --or when there is no heavy Higgs field-- 2 loop diagrams involving these couplings will be suppressed (more so than at one-loop level, since higher powers of the Yukawas will be present in general). Also, assuming $a_i\ll M$, diagrams with trilinear scalar couplings will be subdominant. Hence, in these scenarios the two-loop diagrams depending on gauge couplings are expected to be dominant. The diagrams that have nonzero, gauge-coupling dependent contributions to the soft masses of the light sparticles at two
loops and don't involve traces over hypercharge are shown schematically in figure~\ref{fig:2loopd}. They are similar to the diagrams with internal scalar lines that yield soft masses for the MSSM scalars in minimal gauge mediation; in this case the propagators corresponding to messenger scalars are substituted by lines of heavy squarks, sleptons or Higgs fields. In contrast with the case of gauge mediation, the diagrams featuring traces over hypercharges do not necessarily add up to zero and therefore have to be included; they are shown in figure~\ref{fig:2loopd2}.
\begin{figure}[t]\centering
\includegraphics{twoloopscalar.eps}
\caption{\label{fig:2loopd} Two-loop diagrams involving heavy scalars contributing to the soft masses of the light scalars and not involving traces over hypercharges.}
\end{figure}
\begin{figure}[t]\centering
\includegraphics{twoloopscalar-2.eps}
\caption{\label{fig:2loopd2} Two-loop diagrams involving heavy scalars contributing to the soft masses of the light scalars and involving traces over hypercharges. The black dots represent one-loop counterterms}
\end{figure}
Assigning a nonzero mass $m_i$ to the $i$th light scalar and a mass $\tilde m_k$ to the gaugino of the $k$th group, the result of the diagrams in figure \ref{fig:2loopd} when all mixing angles between scalars are zero is, after proper subtraction in the $\overline{\rm MS}$ scheme in the Feynman gauge,
\begin{align}\nonumber
&(\delta m^2_i)^{2\,\rm loop}(\mu)=-\frac{1}{3072\pi^4}\sum_{k,j}g^4_k C^{(k)}_iS^{(k)}_j\left\{M^2\left(16\pi^2-48-96\log\frac{M^2}{\mu^2}+24\phi\left[\frac{m^2_i}{4M^2}\right]\right)\right.\\
\nonumber &-m^2_i\left(42+\pi^2\!-36\log\frac{M^2}{\mu^2}\!+12\log^2\frac{M^2}{\mu^2}\!-36\log\frac{m^2_i}{M^2}+24\log\frac{M^2}{\mu^2}\log\frac{m^2_i}{M^2}\right.\\
\nonumber&\left.\left.+6\log^2\frac{m^2_i}{M^2}+6\phi\left[\frac{m^2_i}{4M^2}\right]\right)-\frac{12}{{\tilde m}^2_k}\Big({\tilde m}^2_k \left(-4 M^2+{\tilde m}^2_k \left(-18+\pi ^2\right)\right)+\left(4 M^4+8 M^2 {\tilde m}^2_k\right.\right.\\
\nonumber&\left.-6 {\tilde m}^4_k\right) \log^2\frac{M^2}{\mu^2}+4 \left({\tilde m}^2_k (M^2+5 {\tilde m}^2_k)+(M^2-{\tilde m}^2_k) (M^2+3 {\tilde m}^2_k) \log\frac{M^2-{\tilde m}^2_k}{\mu^2}\right) \log\frac{{\tilde m}^2_k}{\mu^2}\\
\nonumber&-6 {\tilde m}^4_k \log^2\frac{{\tilde m}^2_k}{\mu^2}-4 \log\frac{M^2}{\mu^2} \Big(M^2 {\tilde m}^2_k+(M^2-{\tilde m}^2_k) (M^2+3 {\tilde m}^2_k) \log\frac{M^2-{\tilde m}^2_k}{\mu^2}+M^2 (M^2\\
&\left.\left.+2 {\tilde m}^2_k) \log\frac{{\tilde m}^2_k}{\mu^2}\Big)+4 (M^2-{\tilde m}^2_k) (M^2+3 {\tilde m}^2_k) \text{Li}_2\left[\frac{{\tilde m}^2_k}{M^2}\right]\right)\right\}.
\label{eq:2loopmassive}
\end{align}
In the previous formula, $\text{Li}_2$ is the dilogarithm function and $\phi$ is defined in eq.~\eqref{eq:phidef}. $C_i^{(k)}=\sum_a T_i^{k,a}T_i^{k,a}$ represents the Casimir of the gauge group $k$ in the representation $i$. For each value of $k$, the sum in $j$ runs over the irreducible representations (irreps) of the heavy scalars with respect to the $k$th gauge group, and $S^{(k)}_j={\rm Tr} \,T_j^{k,a}T_j^{k,a}$ (no sum over repeated indices) is the Dynkin index of the irrep labeled by $j$. $M$ designates again the mass of the heavy scalars; the result when these are nondegenerate can be simply obtained by substituting $M$ with $M_j$, allowing for different masses for the different representations of the heavy fields. In order to compute these mass corrections, the external momenta were set to zero from the beginning; the integrals were obtained in dimensional regularization using the formulae and techniques of refs.~\cite{Smirnov:2006ry} and \cite{Davydychev:1992mt}. More details are given in appendix \
ref{app:integrals}
. If the light scalars and the gauginos in the loops are massless ($m_i={\tilde m}_k=0$), the result is
\begin{align}
(\delta {m^2_i})^{2\,\rm loop,\,m_i=0}(\mu)=-\frac{M^2}{192\pi^4}\sum_{k,j}g^4_k C^{(k)}_iS^{(k)}_j\left(\pi^2-3-6\log\frac{M^2}{\mu^2}\right).
\label{eq:2loopmassless}
\end{align}
The last two terms inside the brackets differ from the corresponding result of ref.~\cite{Hisano:2000wy}, which was obtained from the formulae for soft masses in models of gauge mediation with generalized messenger sectors by taking the limit in which the fermions in the loop become massless --in this paper, the diagrams that do not involve internal lines of heavy scalars were altogether ignored. The difference can be traced back to a different regularization of the infrared divergences: the authors of ref.~\cite{Hisano:2000wy} use an explicit infrared mass $m^2_\epsilon$ in the integrals denoted as $I[m_1,m_2,1,1,2]$ in appendix \ref{app:integrals} of this paper --see eq.~\eqref{eq:masterI}-- while the calculations presented here simply use dimensional regularization without additional regulators\footnote{While the use of $m^2_\epsilon$ is useful to separate UV and IR divergences and check the cancellation of the latter in physical observables, the use of dimensional regularization alone is equally valid
for computing the
same observables; however, due to the
different regulators, the finite parts can differ, as happens in this case.}.
The contributions of the diagrams in figure 2 involving heavy scalars are as follows:
\begin{align}
\nonumber&(\delta m^2_i)^{2\,\rm loop}(\mu)_Y=\sum_{j,k}g^2_1 g^2_k C^{(k)}_j {d_j} Y_jY_i\frac{M^2}{384\pi^4}\left(\!-9\!+\!\pi^2\!+\!6\log\frac{M^2}{\mu^2}\right)\!+\!\sum_{j,\hat i}g_1^4{d}_j{d}_{\hat i} Y^2_{j} Y_{\hat i} Y_i\frac{m^2_{\hat i}}{1536\pi^4}\times\\
\nonumber&\left(6+\pi^2+3\log^2\frac{m_{\hat i}^2}{\mu^2}+6\log\frac{m_{\hat i}^2}{\mu^2}\left(-1+\log\frac{M^2}{\mu^2}\right)+3\left(-2+\log\frac{M^2}{\mu^2}\right)\log\frac{M^2}{\mu^2}\right)\\
\nonumber&+\sum_{j,\hat j}g_1^4{d}_j{d}_{\hat j} Y^2_{\hat j} Y_j Y_i\frac{M^2}{256\pi^4}\log\frac{m_{\hat j}^2}{\mu^2}\left(-1+\log\frac{M^2}{\mu^2}\right)-\sum_{k, j}g^2_k g_1^2 d_{ j}C^{(k)}_{ j}Y_i Y_{j}\tilde m^2_k\frac{1}{768\pi^4}\Big(\pi^2\\
&+6\log^2\frac{M^2}{\mu^2}\Big)
\label{eq:2loophypercharge}.
\end{align}
The sum over $j$ runs over the irreducible representation of the heavy scalars; the sum over $\hat i$ is over those of the light scalars, while the sum in $\hat j$ is taken over both light and heavy fields (however, when evaluating the threshold corrections at the scale $\mu=M$, only the light fields will contribute to the sum in $\hat j$). $k$ runs over the Standard Model gauge groups, and ${d}_j$ denotes the dimension of the representation $j$. Regarding counterterms and their insertion, the $\overline{\rm MS}$ scheme was implemented by redefining $\mu$ as $\mu\rightarrow e^\gamma(4\pi)^{-1}\mu$ and then performing minimal subtraction. The contributions in eq.~\eqref{eq:2loophypercharge} proportional to $M^2$ coincide with the corresponding results in ref.~\cite{Hisano:2000wy}.
The dominant contributions are those proportional to the prefactor $p^i\equiv\sum_{k,j}g^4_k C^{(k)}_iS^{(k)}_j$, since it includes terms that depend on the strong gauge coupling. In minimal and nonminimal Effective SUSY scenarios (denoted by MES and NMES) one has, respectively --this time neglecting the mixing angles of the heavy Higgs state, assuming it is mostly aligned with $H_d$:\footnote{Considering nonzero mixing angles between $H_u$ and $H_d$ modifies some diagrams, which become equivalent to vacuum integrals with three different masses in the propagators; these can be obtained from ref.~\cite{Davydychev:1992mt}. Since we are interested in the small $\alpha$ limit and since the heavy Higgs contributions are subdominant with respect to those from fields charged under SU(3), we will not provide the full expressions.}
$$p^i_{\rm MES}=9g_1^4C^{(1)}_i+5g_2^4C^{(2)}_i+\frac{9}{2}g_3^4C^{(3)}_i,\quad p^i_{\rm NMES}=\frac{43}{6}g_1^4C^{(1)}_i+\frac{9}{2}g_2^4C^{(2)}_i+4g_3^4C^{(3)}_i,$$
To recover the example from the end of the previous section, fixing $M=\mu=10\,{\rm TeV}$, $m_i=\tilde m_k=300\,{\rm GeV}$, $\cot\alpha=10$ in a nonminimal scenario, using eqs.~\eqref{eq:2loopmassive} and \eqref{eq:2loophypercharge} one gets
\begin{align*}
\nonumber&\delta m^2_{\tilde q_L}\sim-483^2 {\rm GeV}^2,\quad \delta m^2_{\tilde t_R}\sim -462^2 {\rm GeV}^2, \quad \delta m^2_{\tilde b_R}\sim -460^2 {\rm GeV}^2,\quad \delta m^2_{\tilde L}\sim -151^2 {\rm GeV}^2,\\
&\delta m^2_{\tilde e_R}\sim -71^2 {\rm GeV}^2,\quad \delta m^2_{\tilde H}\sim -151^2 {\rm GeV}^2
\end{align*}
where $m^2_{\tilde H}$ is the soft mass of the light Higgs field. Again, these numbers were obtained after computing the gauge couplings at the threshold scale as in ref.~\cite{Tamarit:2012ry}. Fig.~\ref{fig:2loopthresh} shows values of the 2 loop threshold corrections for $m^2_{\tilde q_L}$ as a function of the tree-level scalar mass and a common mass $\tilde m_g$ for the gauginos, for two different values of the heavy mass $M$. It is apparent that 2 loop corrections can be quite large and dominate over the one-loop contributions; also, heavy gauginos tend to enhance them.
\begin{figure}[t]\centering
\begin{minipage}{0.5\textwidth}
\includegraphics[width=8cm]{2loopthresholds5.eps}
\end{minipage}%
\begin{minipage}{0.5\textwidth}
\includegraphics[width=8cm]{2loopthresholds10.eps}
\end{minipage}
\caption{\label{fig:2loopthresh} Two-loop threshold corrections to $m^2_{\tilde q_L}$ in terms of its tree-level value and a common gaugino mass $\tilde m_g$, for heavy sparticles at 5 TeV (left) and 10 TeV (right)}
\end{figure}
\section{\label{sec:bounds}Tachyon bounds for squarks and gaugino masses}
It is known that the 2-loop renormalization group flow in the MSSM when the first and second generation sparticles are heavy may drive the light soft masses towards negative values, which would endanger the stability of the electroweak vacuum. Demanding the absence of tachyonic values for soft masses other than those of the Higgs allows to set lower bounds on the mass scales that set the boundary conditions for the RG flow at the SUSY breaking scale, which can be correlated with a lower bound on the amount of fine-tuning of the theory. As stated in the introduction, bounds were first calculated in ref.~\cite{ArkaniHamed:1997ab} using the RG MSSM equations in the $\overline{\rm DR}$ scheme; they were revisited in ref.~\cite{Tamarit:2012ry} after it was pointed out \cite{Tamarit:2012ie} that mass-independent schemes such as $\overline{\rm DR}$ or $\overline{\rm MS}$, being unphysical and not sensitive to mass thresholds, lack precision when large hierarchies in the masses are present, as in Effective SUSY
scenarios. Using RG equations implementing
decoupling, which effectively resum some of the large perturbative corrections, the tachyon bounds were shown to be substantially relaxed.
Now, all these calculations did not take into account finite threshold effects from the heavy particles, which have been shown here to be large and predominantly negative, so that they will force an increase of the bounds and demand more fine-tuning in the theories. In the same spirit as in ref.~\cite{Tamarit:2012ry}, bounds can be obtained by considering boundary conditions inspired by msugra and gauge mediation but allowing for a large hierarchy between the masses of the sparticles of the first two generations and those of the third generation. The msugra-inspired boundary conditions, set at a SUSY breaking scale $\Lambda_S$, are
\begin{align}
\label{eq:msugrabc}
\begin{array}{c}
\text{ minimal Effective SUSY}\\
\mu=\tilde m_1=\tilde m_2=\tilde m_3=m_F,\\
{ m^2_{q/u/d/l/e}}_{11}= { m^2_{q/u/d/l/e}}_{22}= {m^2_{d/l/e}}_{33}=\Lambda^2,\\
{ m^2_{q/u}}_{33}=m_s^2,\\
\frac{a_u}{y_t}=\frac{a_d}{y_b}=\frac{a_l}{y_\tau}=a_0,
\end{array}\quad\begin{array}{c}
\text{ nonminimal Effective SUSY}\\
\mu=\tilde m_1=\tilde m_2=\tilde m_3=m_F,\\
{ m^2_{q/u/d/l/e}}_{11}= { m^2_{q/u/d/l/e}}_{22}=\Lambda^2,\\
{ m^2_{q/u/d/l/e}}_{33}=m_s^2,\\
\frac{a_u}{y_t}=\frac{a_d}{y_b}=\frac{a_l}{y_\tau}=a_0,
\end{array}
\end{align}
while the ones resembling gauge mediation, also set at a scale $\Lambda_S$, are
\begin{align}
\label{eq:gmbc}
\begin{array}{c}
\text{minimal Effective SUSY}\\
\tilde m_i=g^2_i\Lambda_g,\\
{ m^2_{q/u/d/l/e}}_{11}={ m^2_{q/u/d/l/e}}_{22}={ m^2_{d/l/e}}_{33}=\lambda\frac{\Lambda_S^2}{16\pi^2},\\
{ m^2_{i}}_{33}= \Lambda^2_G\sum_k g_k^4 C^k_2(i),\,i=q,u,\\
a_u=a_d=a_l=0,
\end{array}\quad\begin{array}{c}
\text{nonminimal Effective SUSY}\\
\tilde m_i=g^2_i\Lambda_g,\\
{ m^2_{q/u/d/l/e}}_{11}={ m^2_{q/u/d/l/e}}_{22}=\lambda\frac{\Lambda_S^2}{16\pi^2},\\
{ m^2_{i}}_{33}= \Lambda^2_G\sum_k g_k^4 C^k_2(i),\\
a_u=a_d=a_l=0,
\end{array}
\end{align}
Regarding the boundary conditions of eq.~\eqref{eq:msugrabc}, figure \ref{fig:tachyonmsugra} shows the resulting lower bound in the mass parameter $m_s$ in terms of the scale $\Lambda_S$ for minimal and nonminimal Effective SUSY scenarios, using the MSSM $ \overline{\rm DR}$ RG equations without threshold contributions, the decoupled RG flow of ref.~\cite{Tamarit:2012ie} without thresholds, and finally the decoupled RG flow together with the threshold contributions presented in this paper applied at the scale at which the heavy sparticles are integrated out. $m_F$ was fixed at 1 TeV, the heavy scalars at 20 TeV, and $a_0$ at 0. If the boundary conditions of eq.~\eqref{eq:gmbc} are used, figure \ref{fig:tachyongm} shows analogous results for the lower bound of the soft mass $m^2_Q$ at the scale $\Lambda_S$ with respect to this scale, for two different values of $\lambda$; $\Lambda_g$ was kept at 1 TeV, and the choices of $\Lambda_S$ correspond to heavy sparticles between 10 and 20 TeV. In the literature, and
in spectrum calculators for SUSY scenarios, it is customary to compute physical sparticle masses at a scale near the stop mass in order to minimize theoretical errors; here for simplicity it was chosen to probe for tachyons at a scale of $500$ GeV.
\begin{figure}[h]\centering
\begin{minipage}{.5\textwidth}\centering
\includegraphics[scale=.9]{tachyonboundsMESconst.eps}
\end{minipage}\begin{minipage}{0.5\textwidth}\centering
\includegraphics[scale=.9]{tachyonboundsESconst.eps}
\end{minipage}
\caption{\label{fig:tachyonmsugra} Minimum value of the scalar mass $m_s$ needed to avoid tachyonic soft masses at 500 GeV in terms of the high scale $\Lambda_S$, in minimal (left) and nonminimal (right) Effective SUSY scenarios with the boundary conditions of eq.~\eqref{eq:msugrabc}, with heavy sparticles at 20 TeV. The upper blue dots correspond to the the MSSM $ \overline{\rm DR}$ RG flow, the diamond-shaped marks represent the results with the flow implementing decoupling and including threshold effects, while the boxes denote the results when using the flow implementing decoupling but ignoring threshold effects.}
\end{figure}
\begin{figure}[h]\centering
\begin{minipage}{.5\textwidth}\centering
\includegraphics[scale=.9]{tachyonboundsMESGM.eps}
\end{minipage}\begin{minipage}{0.5\textwidth}\centering
\includegraphics[scale=.9]{tachyonboundsESGM.eps}
\end{minipage}\vskip3mm
\begin{minipage}{.5\textwidth}\centering
\includegraphics[scale=.9]{tachyonboundsMESGM_lambda100.eps}
\end{minipage}\begin{minipage}{0.5\textwidth}\centering
\includegraphics[scale=.9]{tachyonboundsESGM_lambda100.eps}
\end{minipage}
\caption{\label{fig:tachyongm} Minimum value of the boundary value of $(m^2_Q)^{1/2}$ needed to avoid tachyonic soft masses at 500 GeV in terms of the high scale $\Lambda_S$, in minimal (left) and nonminimal (right) Effective SUSY scenarios with the boundary conditions of eq.~\eqref{eq:gmbc}, for $\lambda=1$ (upper plots) and $\lambda=1/100$ (lower plots). The upper blue dots correspond to the the MSSM $ \overline{\rm DR}$ RG flow, the diamond-shaped marks represent the results with the flow implementing decoupling and including threshold effects, while the boxes denote the results when using the flow implementing decoupling but ignoring threshold effects. The choices of $\Lambda_S$ correspond to heavy sparticles betwen 10 and 20 TeV.}
\end{figure}
The results show that the inclusion of the threshold effects in the decoupled RG analysis slightly increases the lower mass bounds obtained by demanding the absence of tachyons, but these bounds still remain well below the ones obtained with the MSSM $ \overline{\rm DR}$ RG flow without decoupling.
Another interesting set of boundary conditions concerns models with an Effective SUSY spectrum in which the light soft masses are generated through gaugino mediation, i.e., they arise through the RG effects of nonzero gaugino masses. Some examples can be found in refs.~\cite{Craig:2011yk,Craig:2012hc,Cohen:2012rm}, in which either deconstruction~\cite{Craig:2011yk,Craig:2012hc} or conformal sequestering \cite{Cohen:2012rm} are used to suppress light soft masses. The large, negative threshold effects that are the central subject of this paper may force unnatural fine-tuning in models of this type: integrating out the heavy sparticles produces large tachyonic contributions to the suppressed soft masses, which may not be compensated by the RG effects of gaugino masses unless these are unnaturally large. Again, one can obtain lower bounds for gaugino masses by using simplified boundary conditions. In the spirit of gaugino mediation with heavy first and second generation scalars, one may
consider the following ones at a scale $\Lambda_S$,
\begin{align}
\label{eq:gaugbc}
\begin{array}{c}
\text{minimal Effective SUSY}\\
\tilde m_i=g^2_i\Lambda_g,\\
{ m^2_{q/u/d/l/e}}_{11}={ m^2_{q/u/d/l/e}}_{22}={ m^2_{d/l/e}}_{33}=\lambda \frac{\Lambda_S^2}{16\pi^2},\\
{ m^2_{i}}_{33}=0,\,i=q,u,\\
a_u=a_d=a_l=0,
\end{array}\quad\begin{array}{c}
\text{nonminimal Effective SUSY}\\
\tilde m_i=g^2_i\Lambda_g,\\
{ m^2_{q/u/d/l/e}}_{11}={ m^2_{q/u/d/l/e}}_{22}=\lambda\frac{\Lambda_S^2}{16\pi^2},\\
{ m^2_{i}}_{33}= 0,\\
a_u=a_d=a_l=0.
\end{array}
\end{align}
The resulting minimum values of the gluino mass $\tilde m_3$ evaluated at 500 GeV are shown in figure \ref{fig:tachyongaugino} in terms of $\Lambda_S$ for minimal Effective SUSY scenarios, in the case $\lambda=1$. It is apparent that demanding no tachyonic charged/colored sparticles requires very heavy gluinos, at 2 TeV or heavier for heavy scalars at 10 TeV or above. Decreasing $\lambda$ implies raising the SUSY breaking scale for a fixed value of the heavy masses, which will only raise the bound on the gluino mass, as there will be more decades of MSSM RG running driving the light soft masses towards negative values (see for example fig.~\ref{fig:tachyongm}). The case of nonminimal scenarios is rather hopeless; this time the mass running deeper into negative values is $m^2_L$ , and the bounds for $\tilde m_3$ reach 8 TeV and higher --eventually crossing the heavy particle threshold, so that the analysis would have to be modified. Alternatively, one may fix the heavy masses at a fixed value, for example at
10 TeV with $\Lambda_g$ at 3 TeV, and probe $\lambda$ in order to obtain the maximum value allowed for the scale of SUSY breaking when demanding the absence of tachyonic masses at low scales; the resulting lower bound on $\Lambda_S$ is as low as 11 TeV.
\begin{figure}[h]\centering
\begin{minipage}{.5\textwidth}\centering
\includegraphics[scale=.9]{tachyonboundsMESGgaug.eps}
\end{minipage}
\caption{\label{fig:tachyongaugino} Minimum value of the gluino mass $\tilde m_3$ at $500$ GeV needed to avoid tachyonic soft masses at the same scale in terms of the high scale $\Lambda_S$, in minimal Effective SUSY scenarios with the boundary conditions of eq.~\eqref{eq:gaugbc} for $\lambda=1$. }
\end{figure}
\section{Summary and conclusions\label{sec:models}}
This paper presents results for finite threshold contributions to the soft masses of light scalars caused by loops involving heavy sparticles in Effective SUSY scenarios, and analyzes their influence in bounds for squarks and gauginos obtained by demanding the absence of tachyonic squarks and sleptons. In contrast with previous results in the literature, nonzero tree-level values for the soft masses of light squarks and gauginos were considered inside the two-loop diagrams contributing to the threshold corrections. It was shown that in the limit of degenerate heavy fields --possibly including a heavy Higgs mostly aligned with $H_d$-- the known one-loop corrections are mainly determined by the hypercharge coupling and small Yukawas and may be negative at the threshold scale of the heavy particles. In this limit in which the heavy fields couple to the light ones through small Yukawas, the two-loop diagrams are dominated by the contributions involving the gauge couplings, which were calculated ignoring
mixing
among the heavy states and considering nonzero tree-level masses for the light scalars and gauginos; the result is given in eqs.~\eqref{eq:2loopmassive} and \eqref{eq:2loophypercharge}.
These two-loop contributions turn out to be quite significant, and they take negative values at the scale of the heavy fields, thus invalidating the na\"ive intuition that the heavy fields could act as ``messengers'' of SUSY breaking for the light scalars. In nonminimal scenarios, for the soft mass of the left-handed third generation squark doublet, they range from around $-250^2\, {\rm GeV}^2$ to $-480^2 {\rm GeV}^2$ for heavy sparticles between 5 and 10 TeV, if light scalars and gauginos are kept at around $500$ GeV (see figure \ref{fig:2loopthresh})-- and thus should not be ignored when analyzing the properties of Effective SUSY models. Fig.~\ref{fig:2loopthresh} also shows that gaugino masses have a sizable impact in the threshold corrections, which become more negative for larger gaugino masses --a gluino mass of around 1 TeV enhances the threshold correction by 20\% or more. The dependence on the light scalar masses is weaker.
The large negative threshold corrections to the light soft masses add to the already known negative 2 loop effects in the RG flow due to the heavy sparticles, which endanger the stability of the electroweak vacuum and may give rise to charged or colored vacua, the avoidance of which forces a lower bound on the light soft masses at the SUSY breaking scale, which translates into a lower bound in fine-tuning. Tachyon bounds for squark masses were reanalyzed, taking into account the threshold effects and using an RG flow implementing decoupling. It was shown that the bounds are slightly increased, but the use of the decoupled RG flow still guarantees that the former remain lower than the ones obtained by using the MSSM $\overline{\rm DR}$ RG equations without integrating out heavy sparticles. This strengthens the case for the need of implementing decoupling in precision calculations in models with hierarchical spectra.
In the case of models in which the light soft masses arise from gaugino mediation, and are thus approximately zero at the SUSY breaking scale, by demanding again the absence of tachyonic squarks/sleptons one may obtain lower bounds for gaugino masses. It was shown that for simple boundary conditions in minimal Effective SUSY scenarios (assuming for example that the SUSY breaking scale is related the scale of the heavy masses by a loop factor), these bounds require gluinos above 2 TeV for heavy squarks at 10 TeV or higher. In nonminimal scenarios the bound is rather more stringent, requiring gluinos above 8 TeV. Alternatively, if the SUSY scale is left to vary with heavy fields fixed at 10 TeV and the boundary value for $\tilde m_3$ fixed at 3 TeV, then the said scale has to be barely above the mass of the heavy fields. These constraints may be avoided in deconstructed SUSY breaking models in which the heavy and light scalars are charged under different gauge groups, as in refs. \cite{Craig:2011yk,Craig:
2012hc},
if the scale at which these groups are higgsed to the diagonal is below the mass of the heavy sparticles. This would imply the presence of new fields beyond the MSSM under the scale of the heavy sparticles, which would alter the RG flow.
Finally, it should be commented that the negative threshold contributions also affect the light Higgs fields, so that they might play an important role in the breaking of electroweak symmetry breaking.
\section*{Acknowledgements}
The author wishes to thank the members of the Particle Physics group at Perimeter Institute for useful conversations. Research
at the Perimeter Institute is supported in part by the Government of Canada through
NSERC and by the Province of Ontario through MEDT. This work was financed in part by the Spanish Ministry of Science and Innovation through project FPA2011-24568.
|
{
"timestamp": "2012-12-05T02:04:14",
"yymm": "1206",
"arxiv_id": "1206.6140",
"language": "en",
"url": "https://arxiv.org/abs/1206.6140"
}
|
\section{Introduction}
The ground states of certain two-dimensional lattice Hamiltonians of a type first introduced by Kitaev\cite{kitaev03} can be used as quantum error-correcting codes known as surface codes. Quantum information can be stored and protected using these codes when they are defined on lattices with holes (defects).\cite{bravyi98} Fault-tolerant gates can then be carried out either transversally or by deforming the code in order to braid these defects while staying entirely within the code subspace.\cite{raussendorf07a,raussendorf07b,fowler09}
One downside to using the Kitaev surface codes, for which defects behave as Abelian anyons, is that to realize a universal set of fault-tolerant gates at least one gate using a resource costly ``magic state'' distillation process\cite{bravyi05} is required. The same is true for fault-tolerant quantum computation using the so-called color codes.\cite{bombin06,bombin07,bombin11,fowler11,landahl11} Nevertheless, quantum computation using these surface codes has a number of appealing features, notably the need for only nearest-neighbor gates between qubits in a two-dimensional array and high error thresholds, e.g. $\sim 1\%$ for the Kitaev surface code.\cite{raussendorf07a,raussendorf07b,fowler09,wang11}
Recently K\"onig, Kuperberg, and Reichardt\cite{koenig10} (KKR) outlined a method for fault-tolerant quantum computation using {\it non-Abelian} surface codes. These codes, which are defined mathematically in terms of the Turaev-Viro topological invariants for 3-manifolds,\cite{turaev92} can be viewed physically as ground states of Levin-Wen models,\cite{levin05} two-dimensional lattice models which generalize the Kitaev model.\cite{connection} These models can be used to realize so-called ``doubled'' versions of any consistent anyon theory, including theories of non-Abelian anyons for which braiding is universal for quantum computation. The simplest such universal anyons are the Fibonacci anyons. Here we refer to the corresponding Levin-Wen model as the Fibonacci Levin-Wen model and, following KKR,\cite{koenig10} refer to the ground states of this model as the Fibonacci code. As shown in Ref.~\onlinecite{koenig10}, when using the Fibonacci code, Fibonacci anyons can be associated with holes in the lattice subject to certain boundary conditions and proper initialization. These Fibonacci anyons can then be used to encode logical qubits and universal quantum computation can be carried out purely by braiding them,\cite{freedman02,bonesteel05,hormozi07} without the need for magic state distillation.
The Levin-Wen models are defined by a set of commuting vertex and plaquette projection operators which act on qubits (more generally, qudits) associated with the edges of a two-dimensional trivalent lattice. When using the ground states of these models as quantum codes it will be necessary to continually measure these vertex and plaquette operators in order to check for errors, which would then have to be corrected without disturbing the quantum information stored in the topological degrees of freedom of the code. For the Kitaev surface code, quantum circuits which can be used to measure these operators are known and are fairly straightforward.\cite{dennis02} For either an $n$-sided plaquette, or a vertex where $n$ edges meet, these measurement circuits each require a single initialized syndrome qubit which is measured after carrying out $n$ controlled-NOT (CNOT) gates.
\begin{figure*}[t]
\begin{center}
\includegraphics[scale=.32]{lattice_vertplaq.pdf}
\end{center}
\caption{(Color online) (a) Example of a trivalent lattice (in this case a honeycomb lattice) on which the Levin-Wen model can be defined. For the Fibonacci Levin-Wen model a qubit is associated with each edge. A particular state which satisfies the vertex constraint $Q_{\bf v} = 1$ on each vertex for this model is shown. Thick edges indicate qubits in the state $|1\rangle$, thin edges indicate qubits in the state $|0\rangle$. (b) Action of the vertex operator $Q_{\bf v}$ on the three qubits on the edges connected to a trivalent vertex, and of the plaquette operators $B^s_{\bf p}$ on the $2n$ qubits associated with an $n$-sided plaquette.} \label{lattice}
\end{figure*}
The simplicity of the quantum circuits used to measure the vertex and plaquette operators for the Kitaev surface code reflects the Abelian nature of this code. It is natural to ask how complex the quantum circuits need to be to measure the vertex and plaquette operators for the non-Abelian Fibonacci code. In this paper we present explicit quantum circuits for performing such measurements. These circuits are built in part out of smaller circuits which carry out unitary transformations which have been described both in KKR\cite{koenig10} and, in the context of entanglement renormalization, in Ref.~\onlinecite{koenig09}. Our goal here is to explicitly construct these circuits in terms of standard elements (Toffoli gates, CNOT gates and single-qubit rotations) in an attempt to quantify their complexity.
The purpose of this work is not to argue that non-Abelian surface codes are viable competitors to the Kitaev surface code. Indeed, we share the view of many in the field that quantum computation using the Kitaev surface code, given its clear advantages over other fault-tolerant quantum computation schemes, may well provide the best practical route to building a functioning quantum computer.\cite{fowler09,divincenzo09} Here our goal is the more modest one of making a first pass at determining the complexity of syndrome extraction for the significantly less well understood Fibonacci code, which we believe is of intrinsic interest in its own right. An additional goal of this work is to begin developing a ``dictionary" for translating the mathematical structures which appear in general anyon theories into interesting quantum circuits, some of which we find require only a few qubits and might feasibly be carried out experimentally in the near future.
\section{Levin-Wen Models and the Fibonacci Code}
The Levin-Wen models\cite{levin05} are defined on two-dimensional trivalent lattices such as the hexagonal lattice shown in Fig.~\ref{lattice}(a). The degrees of freedom of the models are associated with lattice edges which can take on a finite number of labels. These labels can, in general, be oriented, meaning for each label $i$ there is a dual label $i^*$. If $i=i^*$ then the edge is unoriented. For the Fibonacci Levin-Wen model there are only two labels $0$ and $1$ and the edges are unoriented ($0=0^*$, $1 = 1^*$). Thus, for this model, as for the Kitaev surface code, we simply associate a qubit with each edge of the lattice. The two states of each qubit $|0\rangle$ and $|1\rangle$ then correspond to the two labels 0 and 1, respectively.
For a given trivalent lattice the Levin-Wen Hamiltonian has the form
\begin{eqnarray}
H = - \sum_{\bf v} Q_{\bf v} - \sum_{\bf p} B_{\bf p}.
\end{eqnarray}
Here $Q_{\bf v}$ and $B_{\bf p}$ are projection operators associated with the vertices (labeled ${\bf v}$) and plaquettes (labeled ${\bf p}$) of the lattice.
The vertex operator $Q_{\bf v}$ acts on the three qubits associated with the edges connected to vertex ${\bf v}$ and is diagonal in the standard $\{|0\rangle$, $|1\rangle\}$ basis. (Here we focus on the Fibonacci Levin-Wen model and so only consider the case when a single qubit is assigned to each edge.) If these qubits are in the states $|i\rangle$, $|j\rangle$ and $|k\rangle$ the result of applying $Q_{\bf v}$ is determined by the tensor $\delta_{ijk}$ (see Fig.~\ref{lattice}(b)) which, for the Fibonacci Levin-Wen model, is given by,
\begin{eqnarray}
\delta_{ijk} = \left\{\begin{array}{cl}
1 & {\rm if\ } ijk = 000,011,101,110,111\\
0 & {\rm otherwise.}
\end{array}\right.
\end{eqnarray}
The plaquette operator $B_{\bf p}$ is significantly more complex than $Q_{\bf v}$. For example, for a hexagonal plaquette, $B_{\bf p}$ acts on the six qubits on the edges of plaquette ${\bf p}$ in a way determined by the state of the six qubits on the edges connected to the plaquette. $B_{\bf p}$ is therefore a twelve-qubit interaction (in general a $2n$-qubit interaction for an $n$-sided plaquette). For the Fibonacci Levin-Wen model the precise form of the plaquette projection operator is,
\begin{eqnarray}
B_{\bf p} = \frac{1}{1+\phi^2}
\left(B_{\bf p}^0 + \phi B_{\bf p}^1\right),
\label{Eq:Bp}
\end{eqnarray}
where $B_{\bf p}^s$ for $s=0$ and 1 are plaquette operators associated with the label $s$ and $\phi = (\sqrt{5}+1)/2$ is the golden ratio. The action of $B_{\bf p}^s$ on an $n$-sided plaquette is shown in Fig.~\ref{lattice}(b) where,
\begin{eqnarray}
&&B^{s,i^\prime_1 i^\prime_2 \cdots i^\prime_{n-1}i^\prime_n}_{{\bf p},i_1 i_2 \cdots i_{n-1} i_n}(a_1 a_2 \cdots a_{n-1} a_n)\\
&&~~=
F^{a_1i_n i_1}_{si_1^\prime i_n^\prime}
F^{a_2i_1 i_2}_{si_2^\prime i_1^\prime}
\cdots
F^{a_{n-1} i_{n-2} i_{n-1}}_{si_{n-1}^\prime i_{n-2}^\prime}
F^{a_n i_{n-1} i_n}_{s i_n^\prime i_{n-1}^\prime}. \label{Eq:Bps}
\nonumber
\end{eqnarray}
Here the six-indexed tensor $F^{ijk}_{lmn}$, along with $\delta_{ijk}$, forms the basic data of a so-called tensor category --- the mathematical framework for a general anyon theory, in this case the theory of Fibonacci anyons. The $F$ and $\delta$ tensors satisfy certain self-consistency conditions which, among other things, guarantee that the operators $B_{\bf p}^s$ and $Q_{\bf v}$ all commute with each other.\cite{levin05,bp0} Note that since the Fibonacci Levin-Wen model is unoriented, in (\ref{Eq:Bps}) we have assumed $i= i^*$ for all labels. The precise form of the $F$ tensor for this model is given in Sec.~\ref{fmove_sec}.
When using the ground states of the Levin-Wen model as quantum error-correcting codes the commuting vertex and plaquette projection operators $Q_{\bf v}$ and $B_{\bf p}$ should be viewed as stabilizers. The code space is then defined by the requirement that $Q_{\bf v} = 1$ on each vertex and $B_{\bf p} = 1$ on each plaquette. For the Fibonacci code the constraint $Q_{\bf v} = 1$ projects the Hilbert space onto the space spanned by states in which edges in the state $|1\rangle$ form branching loop configurations (see Fig.~\ref{lattice}(a)), while the plaquette constraint $B_{\bf p} = 1$ leads to particular quantum superpositions of these states. As described in KKR,\cite{koenig10} when these code states are defined on lattices with holes that have certain boundary conditions on their edges, these holes (or defects) can realize a ``doubled'' version of the anyon theory characterized by the $F$ and $\delta$ tensors. For the Fibonacci code this means that these defects can encode two types of Fibonacci anyons with opposite chiralities. As further shown in KKR,\cite{koenig10} with proper initialization these defects can be forced to encode Fibonacci anyons of a particular chirality. These chiral anyons can then be used to encode qubits and braided in order to carry out universal quantum computation.
In this paper we focus on the problem of how to measure the stabilizers $Q_{\bf v}$ and $B_{\bf p}$ for the Fibonacci code. In the passive approach to the Levin-Wen model envisioned in KKR,\cite{koenig10} rather than engineering the Levin-Wen Hamiltonian to realize the Fibonacci code it will be necessary to continually measure these operators in order to detect errors which can then be corrected.
\section{Quantum Circuit to Measure $Q_{\bf v}$}
\label{Sec:Qv}
The measurement of $Q_{\bf v}$ for the Fibonacci code is straightforward and not significantly more difficult to carry out than the analogous measurement for the Kitaev surface code. A quantum circuit which carries out a quantum non-demolition measurement of $Q_{\bf v}$ is shown in Fig.~\ref{Qv}. The circuit acts on the three qubits associated with a given vertex as well as a fourth syndrome qubit initialized in the state $|0\rangle$. After carrying out the circuit the syndrome qubit is measured. If it is found to be in the state $|0\rangle$ then $Q_{\bf v} = 1$ for this vertex and the vertex constraint is satisfied, if not then $Q_{\bf v} = 0$ and the vertex constraint is violated.
The most difficult part of the $Q_{\bf v}$ circuit to carry out is likely to be the four-qubit Toffoli gate which performs a NOT gate on the syndrome qubit if and only if the state of each of the three vertex qubits is $|1\rangle$. (Here and throughout it should be understood that an $n$-qubit Toffoli gate is a gate with $n-1$ control qubits and one target qubit.) This four-qubit Toffoli gate is the first of several $n$-qubit Toffoli gates required in our constructions, all of which are directly related to the non-Abelian nature of the Fibonacci code. Here this gate is needed to allow for the loop branching associated with the fact that $\delta_{111} = 1$.
\begin{figure}[t]
\centerline{
\includegraphics[scale=.4]{Qv_circuit.pdf}
}
\
\caption{(Color online) Quantum circuit which can be used to measure $Q_{\bf v}$ for the Fibonacci code.} \label{Qv}
\end{figure}
In what follows we will be interested in quantifying the complexity of the quantum circuits we construct. Of course the notion of quantum circuit complexity is somewhat ill-defined and depends, among other things, on what we take as our primitive gate set. This in turn will depend on the particular hardware of the quantum computer being considered.
Accurate three-qubit Toffoli-class gates have recently been been carried out experimentally using superconducting qubits\cite{mariantoni11,fedorov12,reed12} and trapped ions.\cite{monz09} Motivated by this, we take one primitive gate set to consist of three-qubit Toffoli gates, CNOT gates and single-qubit rotations. An $n$-qubit Toffoli gate can then be carried out using $4n-12$ three-qubit Toffoli gates if $n-3$ additional qubits are available.\cite{barenco95_1} These additional qubits need not be initialized and their states are left unchanged once the full $n$-qubit Toffoli gate is carried out. Thus nearby code qubits which are not being acted on directly by the operator under measurement can be used. With this construction we can count the total number of three-qubit Toffoli gates (or, simply, Toffoli gates), CNOT gates and single-qubit rotations required to carry out a given circuit. For the case of the four-qubit Toffoli gate appearing in our $Q_{\bf v}$ circuit this count gives $4$ Toffoli gates. The total gate count for our $Q_{\bf v}$ circuit is then 4 Toffoli gates and 3 CNOT gates. This can be contrasted with the analogous circuit for the Kitaev surface code which, when acting on a trivalent vertex, would require only 3 CNOT gates (it is, in fact, identical to the circuit shown in Fig.~\ref{Qv} with the four-qubit Toffoli gate removed).\cite{dennis02}
For a second gate count we assume that the $n$-qubit Toffoli gates which appear in our circuits are themselves primitive gates. By this count, our $Q_{\bf v}$ circuit consists of 1 four-qubit Toffoli gate and 3 CNOT gates. We note that there are proposals for carrying out single-step $n$-qubit Toffoli-class gates using trapped ions,\cite{cirac95} superconducting qubits,\cite{lin06} and neutral atoms interacting with cavity photons;\cite{duan05} in addition, it has been observed that these gates are efficiently achieved if one of the qubits has $n$ available quantum levels.\cite{ralph07} Of course $n$-qubit Toffoli gates can also be simulated using the usual primitive gate set consisting of CNOT gates and single-qubit rotations.\cite{toffoli} However, as we have seen with our $Q_{\bf v}$ measurement circuit, and as will become more clear in what follows, the ability to directly carry out accurate $n$-qubit Toffoli gates (with $n = 3,4$ and 5) will give a strong advantage when carrying out quantum computation using the Fibonacci code.
\begin{figure}[t]
\begin{center}
\includegraphics[scale=.35]{f_move_lattice.pdf}
\end{center}\caption{(Color online) (a) An $F$-move, a five-qubit unitary operation defined in terms of the tensor $F^{abe}_{cde^\prime}$. (b) Action of an $F$-move on the abstract trivalent lattice of the Fibonacci code which illustrates the decoupling of this lattice from the physical qubits. In this example the qubits (open circles) are arranged in a Kagome lattice and lie on the edges of an initial trivalent (hexagonal) lattice. After the $F$-move the edges of the new trivalent lattice must be distorted if they are forced to coincide with the physical qubit lattice.} \label{f_lattice}
\end{figure}
\begin{figure*}
\begin{center}
\includegraphics[scale=.35]{f_move_fibonacci.pdf}
\end{center}
\caption{(Color online) $F$-move for Fibonacci anyons. Under this $F$-move a unitary transformation is performed on the qubit associated with the edge which goes from horizontal to vertical conditioned on the state of the qubits on the other four edges. As in Fig.~\ref{lattice} thick lines indicate edges in the state $|1\rangle$ and thin lines indicate edges in the state $|0\rangle$. Only those states which satisfy the $Q_{\bf v}= 1$ constraint are shown.} \label{f_move}
\end{figure*}
Despite requiring a four-qubit Toffoli gate, the $Q_{\bf v}$ measurement circuit shown in Fig.~\ref{Qv} is relatively simple, reflecting the simplicity of the vertex operator. In what follows we turn to the more difficult problem of measuring the plaquette operator $B_{\bf p}$. For this case a brute force approach to constructing a circuit which measures the appropriate operator acting on the edges of a plaquette for each possible state of the edges connected to that plaquette is problematic. Fortunately, there is a useful resource which simplifies the problem greatly --- the $F$-move.
\section{$F$-Move}
\label{fmove_sec}
When using the Fibonacci code, the physical qubits of a quantum computer may be fixed in space and may even form a rigid lattice. However, this physical lattice need not be the same as that formed by the edges of the abstract trivalent lattice used to define the code. Indeed, as emphasized in KKR,\cite{koenig10} this abstract trivalent lattice should be thought of as fluid and constantly changing throughout the computation. These changes are accomplished by carrying out $F$-moves, processes which locally redraw the trivalent lattice while reassigning the physical qubits to new lattice edges and carrying out an appropriate unitary operation.
Specifically, when carrying out an $F$-move five edges of the lattice are redrawn as shown in Fig.~\ref{f_lattice}(a) while a unitary transformation determined by the six indexed tensor $F^{abe}_{cde^\prime}$ (the same $F$ tensor which appears in (\ref{Eq:Bps})) is applied to the five qubits associated with these edges. This five-qubit unitary is a controlled operation on the qubit labeled $e$ in Fig.~\ref{f_lattice}(a) contingent on the states of the other four qubits (labeled $abcd$). The usefulness of the $F$-move here derives from the fact that if one starts in a ground state of a given Levin-Wen model on a particular trivalent lattice then, after performing an $F$-move, the resulting state will be a ground state of the new Levin-Wen model defined on the new trivalent lattice.\cite{koenig09} This is true even though this lattice has decoupled from the physical qubits, as illustrated in Fig.~\ref{f_lattice}(b).
It was shown in KKR\cite{koenig10} that the ability to decouple the abstract trivalent lattice from the physical qubits with $F$-moves is an important resource for carrying out quantum computation using the Fibonacci code. For example, by carrying out sequences of $F$-moves one can deform the code to perform Dehn twists on the trivalent lattice which can then be used to braid defects encoding Fibonacci anyons.\cite{koenig10} Since the braiding of Fibonacci anyons is universal for quantum computation, this means that one can perform a universal set of gates while staying inside the Fibonacci code subspace without the need for magic state distillation.
The $F$-move for the Fibonacci code is represented graphically in Fig.~\ref{f_move}. This figure, together with Fig.~\ref{f_lattice}(a), can serve as a definition of the $F$ tensor for Fibonacci anyons. The effect of carrying out an $F$-move is only shown for those states which satisfy the vertex constraint (i.e. for which $Q_{\bf v}$ = 1 for all vertices). When defining the Levin-Wen models, the $F$ tensor is assumed to vanish when acting on those states which violate the vertex constraint.\cite{levin05} Here we will assume before applying any $F$-move that it has been verified that $Q_{\bf v} = 1$ on each relevant vertex of the initial trivalent lattice. The structure of the $F$-move then guarantees that the vertex constraint will continue to be satisfied on the new trivalent lattice.
\begin{figure}[b]
\begin{center}
\includegraphics[scale=.4]{f_circuit_long.pdf}
\end{center}\caption{(Color online) (a) Quantum circuit which carries out an $F$-move for the Fibonacci code (the $2\times 2$ matrix $F$ is given in Eq.~\ref{fmatrix}). The labels $abcde$ refer to the same labels in Fig.~\ref{f_lattice}(a). (b) Five-qubit controlled-$F$ gate expressed in terms of a five-qubit Toffoli gate. Here $R(\pm \theta\hat y) = e^{\pm i\theta \sigma_y/2}$ are single-qubit rotations about the $y$ axis with $\theta = \tan^{-1} \phi^{-1/2}$ for which $R(\theta \hat y)XR(-\theta \hat y) = F$.} \label{f}
\end{figure}
A quantum circuit which acts on five qubits at a time and which carries out the $F$-move defined in Fig.~\ref{f_move} for those states satisfying the vertex constraint is shown in Fig.~\ref{f}. In this figure the labels $abcde$ refer to the same labels shown in Fig.~\ref{f_lattice}(a). Although it is not immediately apparent from its structure, one can readily check that this circuit has the symmetries of the $F$ tensor\cite{levin05} (e.g., $F^{abe}_{cde^\prime} = F^{cde}_{abe^\prime} = F^{bae}_{dce^\prime}$). Note also that the circuit squares to 1 (since $F^2 = 1$, see below), so the same circuit can be used for the inverse transformation. As described above, this $F$ circuit carries out a particular operation on the qubit labeled $e$ depending on the state of the other four qubits labeled $abcd$ which are themselves left unchanged at the end of the circuit. The $F$ circuit can therefore be viewed as a generalized Toffoli-class gate. Because the four control qubits are not equivalent, it is important to label these qubits in our $F$ circuit as we have done in the green box in Fig.~\ref{f_move}. This notation will be useful when we embed the $F$ circuit into larger circuits acting on more than five qubits.
\begin{figure*}[t]
\begin{center}
\includegraphics[scale=.35]{pentagon_eq_long.pdf}
\end{center}
\caption{(Color online) (a) The pentagon equation, a self-consistency condition which the $F$-move must satisfy. As shown here, the pentagon equation corresponds to a series of $F$-moves which take a particular 7 edged lattice (upper left) back to an identical lattice (lower left) while two of the qubits associated with the lattice edges are swapped. Here and in subsequent figures the edges associated with the initial state before each $F$-move are color coded as in Fig.~\ref{f_lattice}. (b) The pentagon equation as a quantum circuit identity. The sequence of $F$-moves shown in (a) are carried out by repeatedly applying the $F$ circuit defined in Fig.~\ref{f}. The labels $abcde$ in each green box refer to the labels in Fig.~\ref{f}. The circuit equality holds provided the vertex constraint $Q_{\bf v} = 1$ is satisfied on all three vertices in the initial lattice. In the figure, the triplets of numbers given below ``$Q_{\bf v} = 1$'' in the red box indicate the qubits which meet at these vertices.} \label{pentagon}
\end{figure*}
\begin{figure}[b]
\centerline{\includegraphics[scale=.45]{pentagon_simple.pdf}}
\caption{(Color online) Simple two-qubit circuit identity obtained by setting the five effective control qubits (qubits 1,2,3,4, and 7) in the pentagon circuit identity shown in Fig.~\ref{pentagon}(b) to the state $|1\rangle$.} \label{pentagon_simple}
\end{figure}
At the heart of the $F$ circuit is the five-qubit controlled-$F$ gate where $F$ is the $2\times 2$ unitary matrix acting on qubit $e$ when $a=b=c=d=1$,
\begin{eqnarray}
F = \left(\begin{array}{cc} \phi^{-1} & \phi^{-1/2} \\
\phi^{-1/2} & - \phi^{-1}
\end{array}\right).
\label{fmatrix}
\end{eqnarray}
The remaining Toffoli gate and CNOT gates take care of all other cases for which the outcome is essentially fixed by the vertex constraint. As stated above, this circuit is designed to carry out an $F$-move only on those states which satisfy the $Q_{\bf v}= 1$ constraint on all vertices. In what follows we will always assume it has been verified that the vertex constraint is satisfied before applying the $F$ circuit.
Figure \ref{f}(b) shows how to carry out the five-qubit controlled-$F$ gate using a five-qubit Toffoli gate and two single-qubit rotations. This simple construction is possible because $F^2 = 1$ and $\det F = -1$. As for the four-qubit Toffoli gate appearing in the measurement circuit for $Q_{\bf v}$, the appearance of this five-qubit Toffoli gate can be traced back to the fact that loops are allowed to branch in the Fibonacci code and is a direct consequence of the non-Abelian nature of this code. Using the construction of Ref.~\onlinecite{barenco95_1} described above this five-qubit Toffoli gate can be carried out using 8 conventional Toffoli gates. The total gate count for our $F$ circuit is then 9 Toffoli gates, 4 CNOT gates and 2 single-qubit rotations. Alternatively, if we treat $n$-qubit Toffoli gates as primitives, the gate count is 1 five-qubit Toffoli gate, 1 Toffoli gate, 4 CNOT gates and 2 single-qubit rotations. Given the importance of carrying out $F$-moves when using the Fibonacci code,\cite{koenig10} the ability to accurately carry out this five-qubit Toffoli gate can be viewed as an important experimental threshold for realizing this type of quantum computation.
\section{Pentagon Equation}
\label{Sec:pentagon}
The $F$-move satisfies an important self-consistency condition known as the pentagon equation. The pentagon equation can be represented as a sequence of $F$-moves on a seven-edged trivalent lattice as shown in Fig.~\ref{pentagon}(a). In a quantum computer, the lattice edges would be associated with qubits, labeled 1 through 7 in Fig.~\ref{pentagon}(a). As one follows this sequence of $F$-moves, the trivalent lattice is repeatedly redrawn while the qubits, which can be considered fixed in physical space, are reassigned to the new lattice edges after each $F$-move. By the time one has gone all the way around the pentagon the trivalent lattice has returned to its original form. However, the qubits associated with two of the edges (labeled $5$ and $6$ in the figure) are swapped.
The process of carrying out this sequence of five $F$-moves and the resulting qubit swap can be translated into the quantum circuit identity shown in Fig.~\ref{pentagon}(b). We refer to the left-hand side of this identity as the pentagon circuit. The solid green rectangles in the pentagon circuit represent the five-qubit $F$ circuit shown in Fig.~\ref{f} and the corresponding $abcde$ labels are the same as the labels shown in Fig.~\ref{f}. Again we assume that before carrying out the pentagon circuit it has been verified that $Q_{\bf v} = 1$ on each of the two vertices of the initial trivalent lattice. It is only for this case that the circuit identity shown in Fig.~\ref{pentagon}(b) holds (for clarity these vertices are labeled by their associated qubits inside the red box under the equals sign in this figure).
In the pentagon circuit two of the qubits (qubits $5$ and $6$) are acted on while the remaining qubits play the role of control qubits. Simpler quantum circuits can be constructed by fixing these five effective control qubits to be in a particular state. For example, if we fix all the qubits except for $5$ and $6$ to be in the state $|1\rangle$ then the pentagon circuit reduces to the simple two-qubit circuit shown on the left-hand side of the circuit identity in Fig.~\ref{pentagon_simple}. This simplified pentagon circuit consists of five controlled-$F$ gates with alternating control qubits, and the net effect of this sequence of gates is a SWAP gate. Note that when qubits 5 and 6 are both in the state $|0\rangle$ and all other qubits are in the state $|1\rangle$ the vertex constraint is violated in the full seven-qubit pentagon circuit. However, in this case the simplified pentagon circuit merely carries out the identity operation, which is consistent with swapping the two qubits. Therefore the expression shown in Fig.~\ref{pentagon_simple} is an exact circuit identity, regardless of the vertex constraint.
We note the resemblance of this circuit identity to the familiar three CNOT construction of the SWAP gate.\cite{feynman85,barenco95_2} In our case, the circuit identity shown in Fig.~\ref{pentagon_simple} represents the nontrivial part of the pentagon equation which uniquely fixes the form of the matrix $F$ (up to an arbitrary and irrelevant phase choice for the off-diagonal matrix elements). We envision that this circuit identity may be useful for calibrating the $F$ operation. For example, one can imagine tuning $F$ until it can be verified by quantum process tomography that five controlled-$F$ gates with alternating control qubits indeed produce a SWAP gate.
\begin{figure*}[t]
\begin{center}
\includegraphics[scale=.37]{plaquette_to_tadpole.pdf}
\end{center}
\caption{(Color online) Reduction of a hexagonal plaquette to a tadpole through a sequence of $F$-moves.} \label{plaquette_reduction}
\end{figure*}
\section{Quantum Circuit to Measure $B_{\bf p}$}
We now turn to constructing a quantum circuit to measure the plaquette operator $B_{\bf p}$. To do this we use a method inspired by the entanglement renormalization scheme of Ref.~\onlinecite{koenig09}. The essential idea is that through a sequence of $F$-moves any $n$-sided plaquette can be reduced to a 1-sided plaquette with a single external line, i.e. a ``tadpole.'' One such sequence of $F$-moves which reduces a hexagonal plaquette to a tadpole is shown in Fig.~\ref{plaquette_reduction}. Note that the final $F$-move in this sequence acts on four qubits rather than five. A quantum circuit which carries out this reduced $F$-move, obtained by identifying the qubits labeled $a$ and $d$ in the circuit shown in Fig.~\ref{f}, is shown in Fig.~\ref{fprime}. (Gate counts for this reduced $F$ circuit: 5 Toffoli gates, 4 CNOT gates, and 2 single-qubit rotations, or 1 four-qubit Toffoli gate, 1 Toffoli gate, 4 CNOT gates, and 2 single-qubit rotations.)
It was shown in Ref.~\onlinecite{koenig09} that the plaquette operator $B_{\bf p}$ commutes with $F$-moves, i.e. after each $F$-move shown in Fig.~\ref{plaquette_reduction} the value of $B_{\bf p}$ is unchanged even as the plaquette size is reduced. This is equivalent to the statement that if we start with a plaquette in a ground state of the Levin-Wen model (meaning $Q_{\bf v} = 1$ on each vertex and $B_{\bf p} = 1$ for the plaquette) then, after each $F$-move, the qubits will continue to be in the ground state of the Levin-Wen model for the new lattice. Thus, after each $F$-move, it will still be true that $Q_{\bf v} = 1$ on each vertex and $B_{\bf p} = 1$ on the reduced plaquette. This means that after performing the ``disentangling'' reduction of the $n$-sided plaquette to a tadpole one need only measure $B_{\bf p}$ for the tadpole to measure $B_{\bf p}$ for the original plaquette. Since the tadpole only consists of two qubits this measurement is straightforward.
\begin{figure}[b]
\centerline{\includegraphics[scale=.4]{fprime_circuit.pdf}}
\caption{(Color online) Reduced four-qubit $F$-move obtained by identifying the qubits labeled $a$ and $d$ in Fig.~\ref{f}.} \label{fprime}
\end{figure}
\begin{figure}[b]
\centerline{\includegraphics[scale=.35]{s_circuit.pdf}}
\caption{(Color online) (a) $S$ transformation acting on a two-qubit tadpole. The tensor $S^a_{bb^\prime}$ is defined in the text. (b) $S$ circuit which carries out an $S$ transformation. The $2\times 2$ matrix $S$ is given in Eq.~\ref{smatrix}. Here $R(\pm\rho\hat y) = e^{\pm i\rho\sigma_y/2}$ are single-qubit rotations about the $y$ axis with $\rho = \tan^{-1} \phi^{-1}$ for which $R(\rho \hat y)XR(-\rho \hat y) = S$. (c) Quantum circuit which uses the $S$ circuit to measure $B_{\bf p}$ for a two-qubit tadpole.}\label{s}
\end{figure}
\begin{figure*}[t]
\begin{center}
\includegraphics[scale=.55]{Bp_circuit_long.pdf}
\end{center}
\caption{(Color online) Quantum circuit which can be used to measure $B_{\bf p}$ for the Fibonacci code on a hexagonal plaquette based on the plaquette reduction shown in Fig.~\ref{plaquette_reduction}. It must be verified that $Q_{\rm v} = 1$ on each of the six vertices of the plaquette before carrying out the circuit.} \label{Bp}
\end{figure*}
For a tadpole there is a unique eigenstate of $B_{\bf p}$ with eigenvalue 1,\cite{koenig09}
\begin{eqnarray}
|\psi_{B_{\bf p} = 1}\rangle = |0\rangle(|0\rangle + \phi|1\rangle)/\sqrt{1+\phi^2}.
\end{eqnarray}
Here the first qubit is the external line (tail of the tadpole) and the second qubit is the 1-sided plaquette (head of the tadpole). The two-dimensional Hilbert space of states orthogonal to $|\psi_{B_{\bf p} = 1}\rangle$ which satisfy the vertex constraint will then have $B_{\bf p} = 0$. This space is spanned by the states
\begin{eqnarray}
|\psi_{B_{\bf p} = 0},a\rangle &=& |0\rangle(\phi|0\rangle - |1\rangle)/\sqrt{1+\phi^2},\label{bp2}\\
|\psi_{B_{\bf p} = 0},b\rangle &=& |1\rangle |1\rangle.\label{bp3}
\end{eqnarray}
To measure $B_{\bf p}$ for this simple two-qubit system we first rotate the head qubit of the tadpole so that it is in the state $|0\rangle$ if $B_{\bf p} = 1$ and in the state $|1\rangle$ if $B_{\bf p} = 0$. This can be done by carrying out a single-qubit rotation $S$ on the head qubit if and only if the state of the tail qubit is $|0\rangle$ where\cite{modular}
\begin{eqnarray}
S = \frac{1}{\sqrt{1+\phi^2}}\left(\begin{array}{cc} 1 & \phi \\ \phi & -1 \end{array}\right).
\label{smatrix}
\end{eqnarray}
This transformation corresponds to the diagram shown in Fig.~\ref{s}(a) and is defined in terms of the tensor $S^{a}_{bb^\prime}$ which is equal to the matrix $S$ when $a=0$ and for which $S^{1}_{11} = 1$ (the case $S^{1}_{bb^\prime}$ with $b=0$ or $b^\prime=0$ violates the vertex constraint). A quantum circuit which carries out this transformation (and its inverse since the circuit squares to 1) is shown in Fig.~\ref{s}(b). This circuit can be carried out with 1 CNOT gate and 2 single-qubit rotations. Like the $F$ circuit, this simple construction is possible because $S^2 = 1$ and $\det S = -1$. If the two tadpole qubits are initially in the state $|\psi_{B_{\bf p} = 1}\rangle$ the result of carrying out this circuit is the state $|0\rangle |0\rangle$. If the two tadpole qubits are initially in the two-dimensional Hilbert space spanned by the states $\{|\psi_{B_{\bf p}=0},a\rangle, |\psi_{B_{\bf p}=0},b\rangle\}$ then after carrying out this circuit they will be in the space spanned by the states $\{|0\rangle |1 \rangle,|1\rangle |1 \rangle\}$. In either case the state of the second qubit, i.e. the rotated head of the tadpole, will be equal to $1-B_{\bf p}$.
After carrying out the $S$ circuit on the tadpole, a CNOT gate can be done with the head qubit as the control qubit and a syndrome qubit, initialized to the state $|0\rangle$, as the target qubit. The syndrome qubit can then be measured and if the result is $0$ then $B_{\bf p} = 1$ for the tadpole (and hence for the original plaquette), and if the result is $1$ then $B_{\bf p} = 0$.
After measuring $B_{\bf p}$ for the tadpole, the final step is to reconstruct the full plaquette. This can be done by undoing the $S$ circuit on the tadpole and then undoing the $F$-moves. Putting everything together the resulting measurement circuit for the case of a hexagonal plaquette is the palindromic circuit shown in Fig.~\ref{Bp}. In this circuit the notation is the same as in the pentagon circuit, with each box corresponding to either the full or reduced $F$ circuit, or the $S$ circuit, and the letters labeling the various ``inputs'' as defined in Figs.~\ref{f},\ref{fprime}, and \ref{s}. From the structure of the circuit it is clear how this construction generalizes to the case of an arbitrary $n$-sided plaquette.
We again emphasize that the circuit shown in Fig.~\ref{Bp} only measures $B_{\bf p}$ correctly if the vertex constraint $Q_{\bf v} = 1$ is satisfied on each vertex of the initial plaquette at the start of the circuit. If the vertex constraint is violated on any of these vertices then by definition $B_{\bf p} = 0$ for the plaquette;\cite{levin05} but the circuit will, in some cases, give the wrong result of $B_{\bf p} = 1$. To see this, consider the action of this circuit on the full $2^{2n}$-dimensional Hilbert space of the $2n$ qubits associated with an $n$-sided plaquette, including those states which violate the vertex constraint. From the structure of the circuit, which performs a unitary transformation on $2n$ qubits and then measures the state of a single qubit to determine $B_{\bf p}$, it is clear that the dimensionalities of the Hilbert spaces for which $B_{\bf p} = 1$ or $B_{\bf p} = 0$ would both be $2^{2n-1}$, i.e. half that of the full Hilbert space. However, once the vertex constraint is taken into account the Hilbert space is greatly reduced. The dimensionalities of the projected Hilbert spaces for which $Q_{\bf v} = 1$ on each of the $n$ vertices and $B_{\bf p}= 1$ or $B_{\bf p} = 0$ for the plaquette are ${\rm Dim}[B_p = 1] = F_{2n-1}$ and ${\rm Dim}[B_p = 0] = F_{2n+1}$, respectively, where $F_n$ is the $n$th Fibonacci number ($F_{0} = 0$, $F_1 = 1$, $F_2 = 1$, $F_3 = 2$, etc.). For the case of a hexagonal plaquette this means the full $4096 = 2^{12}$ dimensional Hilbert space of twelve qubits is projected down to a space of dimensionality $322 = F_{11}+F_{13} = 89 + 233$ with an 89 dimensional space of states satisfying the plaquette constraint with $B_{\bf p} = 1$. The reader will be reassured to know we have numerically checked that the circuit shown in Fig.~\ref{Bp} performs the correct measurement of $B_{\bf p}$ on this projected space.
\begin{figure*}[t]
\begin{center}
\includegraphics[scale=.35]{sf_lattice_long.pdf}
\end{center}
\caption{(Color online) (a) Sequence of $F$-moves which pulls a tadpole through a line. (b) Four-qubit quantum circuit which initializes a tadpole with an $S$ circuit, carries out the sequence of two $F$-moves shown in (a), and then performs another $S$ circuit so that measuring qubit 3 would yield $B_{\bf p}$ for the new tadpole. The tadpole is initialized to a state with $B_{\bf p} = 1$ or $0$ depending on whether the initial state of qubit 4 is $|0\rangle$ or $|1\rangle$, respectively. The circuit equality holds provided $Q_{\bf v} = 1$ on the vertices of the initial lattice. As in Fig.~\ref{pentagon}(b) these vertices are labeled inside the red box. The $2\times 2$ matrix $U$ is given by Eq.~\ref{umatrix} in the text.} \label{sf_circuit}
\end{figure*}
It should be noted that the requirement that $Q_{\bf v} = 1$ on each vertex before measuring $B_{\bf p}$ may cause problems when extracting error syndromes. For example, if a faulty measurement of $Q_{\bf v}$ gives 1 for a particular vertex on a plaquette, but the actual value of $Q_{\bf v}$ is 0 for that vertex, then, as described above, the $B_{\bf p}$ measurement circuit for the plaquette will, in some cases, give $B_{\bf p} = 1$ even though the correct value (as it is for any plaquette in which a vertex constraint is violated) is $B_{\bf p} = 0$. In this paper we have not addressed the important question of whether it is possible to extract error syndromes for the Fibonacci code fault tolerantly, nor the question of precisely how these errors would be corrected. Our goal has been to construct circuits which, in the absence of errors, can be used to measure $Q_{\bf v}$ and $B_{\bf p}$ in order to begin to get a measure of their complexity.
We can now give our final gate counts for measuring $B_{\bf p}$. If we choose to reduce all $n$-qubit Toffoli gates to conventional three-qubit Toffoli gates (using $4n - 12$ Toffoli gates, following Ref.~\onlinecite{barenco95_1} as described in Sec.~\ref{Sec:Qv}) then we find that our procedure for an $n$-sided plaquette (with $n\ge 2$) requires $18n - 26$ Toffoli gates, $8n-5$ CNOT gates and $4n$ single-qubit rotations. Alternatively, if we consider $n$-qubit Toffoli gates as primitives, then our procedure requires $2n-4$ five-qubit Toffoli gates, 2 four-qubit Toffoli gates, $2n-2$ Toffoli gates, $8n-5$ CNOT gates and $4n$ single-qubit rotations.\cite{2qbgates} Not surprisingly, this is significantly more demanding than the analogous requirement for the Kitaev surface code, for which only $n$ CNOT gates are needed to measure the plaquette operator for an $n$-sided plaquette.
Finally we note that there are, of course, many different ways to reduce a given plaquette to a tadpole using $F$-moves, all of which can be used to measure $B_{\bf p}$ and some of which will be more ``parallelizable'' than others.
\section{A Simple Example}
One of the motivations of the present work is to find simple quantum circuits which might feasibly be carried out in the near term and which begin to test some of the key properties of the Fibonacci code. We have already seen one example of such a circuit, the sequence of five controlled-$F$ gates which results in a two-qubit SWAP gate discussed in Sec.~\ref{Sec:pentagon}. This circuit is a simplified version of the full seven-qubit pentagon circuit shown in Fig.~\ref{pentagon}(b) and can potentially be used to calibrate the $F$ operation. In this section we give a similar example --- a four-qubit circuit which first initializes a tadpole into a state with either $B_{\bf p} = 1$ or $0$, and then pulls this tadpole through a line using $F$-moves to produce a new tadpole which can be measured to verify that the value of $B_{\bf p}$ has not changed. As for the pentagon circuit, this four-qubit circuit can be simplified to a two-qubit circuit which, in this case, can be used to calibrate the $S$ operation.
The sequence of operations we consider is illustrated in Fig.~\ref{sf_circuit}(a). The system consists of a four-edged trivalent lattice and so uses four qubits, labeled 1 through 4 in the figure. Initially two qubits (1 and 2) are assigned to edges which form a line and the other two qubits (3 and 4) form a tadpole attached to this line. As always, in what follows we assume that it has been verified that $Q_{\bf v} = 1$ on each of the two vertices of this lattice at the start of the process.
The first step is to initialize the tadpole in a state with either $B_{\bf p} = 1$ or $0$. Then, using two $F$-moves, as shown in Fig.~\ref{sf_circuit}(a), the tadpole can be pulled through the line. The $F$-moves preserve $B_{\bf p}$, and so the intermediate state of this process is a 2-sided plaquette which has been initialized either into the code space if $B_{\bf p} = 1$ or outside of the code space if $B_{\bf p} = 0$. After the tadpole has been pulled through the line, the two qubits forming the initial tadpole have swapped places --- the head of the tadpole is now the tail and vice versa. If $B_{\bf p}$ is now measured for the new tadpole the result should yield the same value of $B_{\bf p}$ that the tadpole was initialized to at the start of the process.
The left-hand side of the circuit identity shown in Figure \ref{sf_circuit}(b) is a four-qubit circuit which carries out the procedure described above. If qubit 4 is initially in the state $|1-a\rangle$ then the first $S$ circuit initializes the tadpole in a state with $B_{\bf p} = a$. A reduced $F$ circuit then carries out the first $F$-move and produces a 2-sided plaquette with $B_{\bf p} = a$. Next, a second reduced $F$ circuit carries out the second $F$-move producing a new tadpole with $B_p = a$ but with the head and tail of the tadpole interchanged. Finally, after carrying out an $S$ circuit on this tadpole the state of qubit 3 will be $|1-a\rangle$.
Note that if qubit 4 is initially in the state $|1\rangle$ so that the tadpole is initialized to a state with $B_p = 0$ then qubit 3 can initially be in either the state $|0\rangle$ or $|1\rangle$ while still satisfying the vertex constraint. After the first $S$ circuit on the left-hand side of Fig.~\ref{sf_circuit}(b) is carried out the tadpole will then be placed in a quantum superposition of $|\psi_{B_{\bf p} = 0},a\rangle$ and $|\psi_{B_{\bf p}=0},b\rangle$ (See Eqns.~(\ref{bp2}) and (\ref{bp3})). At the end of the circuit, after being pulled through the line formed by qubits 1 and 2, the tadpole will still be in the two-dimensional $B_{\bf p} = 0$ Hilbert space, but the particular superposition will in general have changed. Direct calculation shows that if qubits 1 and 2 are both in the state $|1\rangle$ then the operation acting on the two-dimensional $B_p =0$ space when pulling the tadpole through the line is given by
\begin{eqnarray}
U = \left(\begin{array}{cc} -\phi^{-2} & \sqrt{1-\phi^{-4}} \\
\sqrt{1-\phi^{-4}} & \phi^{-2}\end{array}\right).
\label{umatrix}
\end{eqnarray}
Otherwise, if either qubit 1 or qubit 2 (or both) are in the state $|0\rangle$ the state of the final tadpole will be the same as that of the initial tadpole with the head and tail qubits swapped. These cases are all accounted for by the SWAP gate and four-qubit controlled-$U$ operation on the right-hand side of the circuit identity in Fig.~\ref{sf_circuit}(b). As for the pentagon circuit, this identity only holds when $Q_{\bf v} = 1$ on the two vertices of the initial lattice (again these vertices are labeled inside the red box in the figure).
\begin{figure}[t]
\begin{center}
\includegraphics[scale=.35]{sf_simple.pdf}
\end{center}
\caption{(Color online) (a) Simplified two-qubit circuit identity obtained by setting qubits 1 and 2 to the state $|1\rangle$ in the circuit identity shown in Fig.~\ref{sf_circuit}(b). (b) Equivalent circuit identity obtained by moving the two NOT gates from the left side of the identity shown in (a) to the right side.} \label{sf_simple}
\end{figure}
This four-qubit circuit, which essentially represents initializing a 2-sided plaquette into a state with a given value of $B_{\bf p}$ and then producing a state which can be measured to determine $B_{\bf p}$ after carrying out a different $F$-move than the one used to initialize it, is much simpler than the full circuit for measuring $B_{\bf p}$ for a hexagonal plaquette. However, it still involves the four-qubit Toffoli gate which appear in the reduced $F$ circuit. As for the pentagon circuit, a simpler two-qubit circuit identity can be found by fixing the states of the qubits which act effectively as control qubits (qubits 1 and 2 in Fig.~\ref{sf_circuit}). If we fix these qubits to both be in the state $|1\rangle$ we obtain the simplified two-qubit circuit identity shown in Fig.~\ref{sf_simple}(a).
This circuit identity can be simplified further by multiplying both sides on the left and right by NOT gates which act on the top and bottom qubits, respectively, to obtain the equivalent circuit identity shown in Fig.~\ref{sf_simple}(b). Note that if the initial state for the circuit shown in Fig.~\ref{sf_simple}(a) is $|1\rangle|0\rangle$, where the first qubit is the top qubit (qubit 3 in Fig.~\ref{sf_circuit}), then the vertex constraint is not satisfied for the full four-qubit circuit. However, the simplified circuit identity is readily seen to be satisfied in this case. For all other cases the vertex constraint is satisfied, and so it follows that the expression shown in Fig.~\ref{sf_simple}(a) and the equivalent expression in Fig.~\ref{sf_simple}(b) are exact circuit identities, independent of whether or not the vertex constraint is satisfied.
The key action of the two-qubit circuit on the left-hand side of Fig.~\ref{sf_simple}(b) occurs when the tadpole is initialized in a state with $B_{\bf p} = 1$ for which the tail qubit must start in the state $|0\rangle$. For this case, after pulling the tadpole through the line the new tadpole must again be in the state with $B_{\bf p} = 1$. Thus, after accounting for the removal of the two NOT gates, this circuit must take the state $|1\rangle |0\rangle$ to the state $|0\rangle |1\rangle$.
Like the five controlled-$F$ SWAP circuit in Fig.~\ref{pentagon_simple}, which can be used to calibrate the $F$ matrix, the circuit identity shown in Fig.~\ref{sf_simple}(b) can be used to calibrate the $S$ matrix. Once $F$ has been fixed by the pentagon circuit, the requirement that the circuit on the left-hand side of Fig.~\ref{sf_simple}(b) takes the state $|1\rangle|0\rangle$ to the state $|0\rangle |1\rangle$ fixes the form of the matrix $S$ (up to an overall phase which is irrelevant for our purposes). Note that in performing this calibration it is not necessary to carry out a full quantum process tomography. It is sufficient to verify that the circuit identity holds for the initial state $|1\rangle|0\rangle$. For this case, only the SWAP gate on the right-hand side is relevant since the controlled-$XUX$ gate enters only when the initial state of the second qubit is $|1\rangle$.
\section{Conclusions}
In this paper we have constructed explicit quantum circuits for measuring the vertex and plaquette operators, $Q_{\bf v}$ and $B_{\bf p}$, in the Fibonacci Levin-Wen model. These operators can be viewed as stabilizers for the Fibonacci code,\cite{koenig10} a surface code for which defects can behave as Fibonacci anyons --- the simplest non-Abelian anyons for which braiding alone is universal for quantum computation. While the $Q_{\bf v}$ measurement is not significantly more difficult than the analogous measurement for the Kitaev surface codes (for which the defects behave as Abelian anyons), the $B_{\bf p}$ measurement scheme we present here {\it is} significantly more difficult than its Abelian counterpart. While the present scheme is almost certainly not the most efficient one for performing this measurement, given the complexity of the operator $B_{\bf p}$ it is likely that even the most efficient schemes will require a large number of primitive gate operations. This cost in circuit complexity will then need to be weighed against the gain of not requiring magic state distillation. The situation is somewhat analogous to comparing the relative merits of performing topological quantum computation with Ising anyons (which requires magic state distillation) to Fibonacci anyons.\cite{baraban10}
It is clear that further work will be needed before such a direct comparison of the resources needed to carry out fault-tolerant quantum computation using the Fibonacci code with that using the Kitaev surface code will be possible. While recent progress strongly suggests that the Kitaev surface code is the most promising from the practical point of view of trying to build an actual fault-tolerant quantum computer, we believe it is too early to rule out the possibility that the Fibonacci code may have some practical implications. Even if it does not, we believe the Fibonacci code is of intrinsic interest, in part because computing with it can be viewed as essentially simulating a non-Abelian state of matter on a quantum computer.
Our measurement circuit for $B_{\bf p}$ is built out of circuits which realize $F$-moves and the action of the $S$ matrix on a trivalent lattice. In addition to our measurement circuits we have also given simpler circuits built out of these $F$ and $S$ circuits. The first is a seven-qubit circuit which can be used to verify that the $F$ circuit satisfies the pentagon equation, as well as a simpler two-qubit circuit which contains the nontrivial content of this equation and fixes the form of the $F$ matrix. The second is a four-qubit circuit which uses the $S$ circuit to initialize a tadpole (1-sided plaquette) in a state with either $B_{\bf p} = 1$ or $0$, carries out a sequence of $F$-moves to pull the tadpole through a line, and then produces a state which can be measured to determine $B_{\bf p}$ for the new tadpole. For this circuit we have also given a simpler two-qubit circuit which, once $F$ has been fixed by the pentagon circuit, fixes the form of the $S$ matrix. These simple two-qubit circuits (Figs.~\ref{pentagon_simple} and \ref{sf_simple}) may be useful for calibrating the $F$ and $S$ operations.
A recurring theme in this work has been the need for $n$-qubit Toffoli gates (with $n =3,4$ and 5) when computing with the Fibonacci code. These $n$-qubit Toffoli gates arise as a natural consequence of the non-Abelian nature of this code. We believe the possibility of using non-Abelian surface codes such as the Fibonacci code for fault-tolerant quantum computation provides further motivation for developing experimental techniques to directly carry out accurate $n$-qubit Toffoli-class gates.
\
\acknowledgments NEB acknowledges support from US DOE Grant No. DE-FG02-97ER45639 and DDV is grateful for support from the Alexander von Humboldt foundation.
|
{
"timestamp": "2012-10-17T02:13:42",
"yymm": "1206",
"arxiv_id": "1206.6048",
"language": "en",
"url": "https://arxiv.org/abs/1206.6048"
}
|
\section{Introduction}
\label{intro}
The envelope theory \cite{hall89,hall01,hall01b,hall02,hall03,hall05} is a powerful method to obtain approximate solutions, eigenvalues and eigenstates, of eigenequations in quantum mechanics. It has been rediscovered and developed recently under the name of the auxiliary field method \cite{silv08,silv09,eigen,silv11,sema11a,sema12}. Both techniques are equivalent \cite{buis09}, but they were introduced from completely different starting points. Let us assume that the Hamiltonian $H$ can be written as (in the following, we will work in natural units $\hbar = c = 1$)
\begin{equation}
\label{TpV}
H = T(p) + V( r),
\end{equation}
with $p=|\bm p|$ and $r=|\bm r|$. Such a form is relevant for one-body and two-body systems. The basic idea is to replace this Hamiltonian $H$ by another one $\widetilde H$ which is solvable, the eigenvalues of $\widetilde H$ being optimized to be as close as possible to those of $H$. Following the structure of the Hamiltonian, the approximate eigenvalues \emph{i)} can be upper or lower bounds, or not to have a variational character; \emph{ii)} can be obtained on a closed-form expression, or only numerically computed. In the most favorable case, an analytical bound, the dependence of the eigenvalues on parameters of the Hamiltonian and the quantum numbers can be determined, giving deep insights about the structure of the solutions and a reliable estimation of the spectrum. Even in the less favorable situation, a non-variational numerical approximation, it is possible to check easily and rapidly more elaborate numerical computations.
At the origin, the method has been developed for Schr\"odinger equations \cite{hall89,silv08}, and afterwards
it has been extended for the semirelativistic kinematics \cite{hall01,silv09}. The purpose of this work is to generalize the technique to arbitrary kinetic operators. This is motivated by the existence of non-standard kinetic energies in some physical problems, for instance in atomic physics (non-parabolic dispersion relation) \cite{arie92} or in hadronic physics (particle mass depending on the relative momentum) \cite{szcz96}. Another motivation is to support the new definition of the effective particle mass proposed in Refs.~\cite{arie92,arie12,arie12b,arie12c}.
The generic method to compute approximate solutions (which could be upper or lower bounds) of a Hamiltonian with a non-standard kinetic part is presented in Section~\ref{ge}. As this work is a generalization of the one described in Ref.~\cite{sema12}, the main equations are worked out without too many details. In Section~\ref{sc}, a semiclassical interpretation of the generic equations is given. In order to check the validity of the method, an analytical toy model with a Gaussian form for the kinetic part is solved and the formula obtained is compared with numerical solutions in Section~\ref{tm}. Concluding remarks are given in Section~\ref{conclu}.
\section{General equation}
\label{ge}
The envelope theory is generalized here to treat on the same footing the potential and kinetic parts. The idea is to replace the Hamiltonian~(\ref{TpV}) by the following one
\begin{equation}
\label{Htilde}
\widetilde H = \widetilde T + \widetilde V,
\end{equation}
with
\begin{equation}
\label{Ttilde}
\widetilde T = \frac{\bm p^2}{2\, \nu} + T(J(\nu)) - \frac{J(\nu)^2}{2\, \nu},
\end{equation}
and
\begin{equation}
\label{Vtilde}
\widetilde V = \rho\,P(r) + V(I(\rho)) -\rho\,P(I(\rho)).
\end{equation}
$\nu$ and $\rho$ are two real parameters, and we assume that the following functions are well-defined
\begin{eqnarray}
\label{Htilde2}
&&\quad I(x)=K^{-1}(x), \quad K(x)=\frac{V'(x)}{P'(x)}, \nonumber \\
&&\quad J(x)=L^{-1}(x), \quad L(x)=\frac{x}{T'(x)}.
\end{eqnarray}
The kinematics of $\widetilde H$ is nonrelativistic, $P(x)$ is an auxiliary potential, and a prime denotes the derivative. An eigenvalue of this Hamiltonian is given by
\begin{equation}
\label{EAFM}
E(\nu_,\rho) = T(J(\nu)) - \frac{J(\nu)^2}{2\, \nu}
+V(I(\rho)) - \rho\,P(I(\rho)) + \epsilon(\nu,\rho),
\end{equation}
where $\epsilon(\nu,\rho)$ is an eigenvalue of the nonrelativistic Hamiltonian
\begin{equation}
\label{HNR}
H_{\textrm{\scriptsize{NR}}} = \frac{\bm p^2}{2 \nu}+ \rho\,P(r).
\end{equation}
Kinetic and potential parts are treated in a similar way, but with $x^2$ which is the counterpart of $P(x)$ and $1/(2\, \nu)$ which is the counterpart of $\rho$. The approximation for an eigenvalue of the genuine Hamiltonian is given by $E(\nu_0,\rho_0)$ for which
\begin{equation}
\label{condextrema}
\left. \frac{\partial E(\nu,\rho)}{\partial \nu}\right|_{\nu_0,\rho_0} = \left. \frac{\partial E(\nu,\rho)}{\partial \rho}\right|_{\nu_0,\rho_0} = 0.
\end{equation}
Within these conditions, $\widetilde T$ is tangent to $T$ and $\widetilde V$ is tangent to $V$. The comparison theorem \cite{reed78,sema11b} implies that, if $\widetilde T \ge T$ and $\widetilde V \ge V$ for all values of the arguments, then the eigenvalues of $\widetilde H$ are upper bounds of the corresponding eigenvalues of $H$. Reciprocally, if $\widetilde T \le T$ and $\widetilde V \le V$ for all values of the arguments, then the eigenvalues of $\widetilde H$ are lower bounds. In other cases, no guarantee exists about the variational character of the approximations. A simple criteria to determine if a bound exists is given in Refs.~\cite{hall01,hall01b,hall02} for the potential, but it can be also used for the kinetic part. Let us define two functions $h$ and $g$ such that
\begin{equation}
\label{hg}
T(x) = h(x^2) \quad \textrm{and}\quad V(x) = g(P(x)).
\end{equation}
If $h''(x)$ and $g''(x)$ are both concave (convex) functions, $E(\nu_0,\rho_0)$ is an upper (lower) bound of the genuine eigenvalue. If $T(p) \propto p^2$ ($V(r) \propto P(r)$), the variational character is solely ruled by the convexity of $g(x)$ ($h(x)$).
Interesting results can be obtained if the auxiliary potential is a power law,
\begin{equation}
\label{Pr}
P(r) = \textrm{sgn}(\lambda)\, r^\lambda \quad \textrm{with} \quad 0 \ne \lambda > -2.
\end{equation}
Within this condition, $\rho$ and $\nu$ are always positive quantities, and $\epsilon(\nu,\rho)$ can be written under the form \cite{silv08,hall89}
\begin{equation}
\label{epsPr}
\epsilon(\nu,\rho) = \frac{\lambda+2}{2 \lambda}\left( |\lambda|\rho \right)^{2/(\lambda+2)}\left(\frac{Q^2}{\nu}\right)^{\lambda/(\lambda+2)},
\end{equation}
where $Q$ is a global quantum number. The method is particularly interesting if $Q$ is exactly known. This is the case for the Coulomb interaction ($\lambda=-1$, $Q=n+l+1$) and the harmonic potential ($\lambda=2$, $Q=2\, n+l+3/2$). If $\lambda=1$, $Q$ is known only for $l=0$ states and is equal to $2 (-\alpha_n/3)^{2/3}$, where $\alpha_n$ is the $(n + 1)$th zero of the Airy function Ai. For arbitrary values of $\lambda$, simple and good analytical approximations can be found in Ref.~\cite{silv08}. But, if $Q$ is not computed with a sufficient accuracy, the variational character of a bound cannot be guaranteed.
After some algebra, constraints (\ref{condextrema}), with $P(r)$ given by (\ref{Pr}), reduce to
\begin{eqnarray}
\label{cond1}
(|\lambda|\, Q^\lambda)^\frac{2}{\lambda+2} (\rho_0\,\nu_0)^\frac{2}{\lambda+2} &=& p_0^2, \\
\label{cond2}
(|\lambda|\, Q^\lambda)^\frac{2}{\lambda+2} (\rho_0\,\nu_0)^{-\frac{\lambda}{\lambda+2}} &=& |\lambda|\, r_0^\lambda,
\end{eqnarray}
where $r_0=I(\rho_0)$ and $p_0=J(\nu_0)$. From (\ref{cond1}) and (\ref{cond2}), we can deduce that $r_0\, p_0 = Q$. Taking into account this result, plus the relations $\rho_0=K(r_0)=V'(r_0)/(|\lambda|\,r_0^{\lambda-1})$ and $\nu_0=L(p_0)=p_0/T'(p_0)$, (\ref{cond1}) and (\ref{cond2}) can be written in the more compact form (\ref{AFM3}).
After some algebra, the approximate eigenvalue $E(\nu_0,\rho_0)$ given by (\ref{EAFM}) with the parameterization (\ref{epsPr}) can be greatly simplified into (\ref{AFM1}). Finally, the approximate solution is given by the following set of equations
\begin{eqnarray}
\label{AFM1}
&&E = T(p_0)+V(r_0), \\
\label{AFM2}
&&p_0 = \frac{Q}{r_0}, \\
\label{AFM3}
&&p_0\, T'(p_0) = r_0\, V'(r_0).
\end{eqnarray}
The parameter $r_0$ can then be interpreted as a mean distance between the particles and $p_0$ as a mean momentum per particle. Both parameters depend on the quantum numbers via $Q$. The value $E$ can be a (upper or lower) bound following the convexity of functions $h(x)$ and $g(x)$, as explained above. Let us note that the only trace of the auxiliary potential is contained in the value of $Q$, and that (\ref{AFM3}) is the translation into the variables $r_0$ and $p_0$ of the generalized virial theorem \cite{virial}. The system~(\ref{AFM1})-(\ref{AFM3}) is similar to the systems (3.2)-(3.4) in Ref.~\cite{sema11a} and (15)-(17) in Ref.~\cite{sema12}, but here the form of the kinetic part is arbitrary.
Let us note $|\nu_0,\rho_0\rangle$ an eigenstate of the nonrelativistic Hamiltonian $H_{\textrm{\scriptsize{NR}}}$ given by (\ref{HNR}) with $\nu=\nu_0=p_0/T'(p_0)$ and $\rho=\rho_0=V'(r_0)/(|\lambda|\,r_0^{\lambda-1})$. Such a state is an approximation of the corresponding eigenstate of $H$ \cite{eigen}. Using the Hellmann-Feynman theorem \cite{Hell35} as in Ref.~\cite{silv08}, it can be shown that
\begin{eqnarray}
\label{pmean}
\langle \nu_0,\rho_0 | \bm p^2 | \nu_0,\rho_0\rangle &=& p_0^2,\\
\label{rmean}
\langle \nu_0,\rho_0 | r^\lambda |\nu_0,\rho_0\rangle &=& r_0^\lambda.
\end{eqnarray}
This confirms the interpretation of parameters $r_0$ and $p_0$ as mean values.
\section{semiclassical interpretation}
\label{sc}
\begin{figure}[ht]
\begin{center}
\includegraphics*[width=6cm]{classeff.eps}
\caption{Classical circular motion of two particles in their center of mass frame. \label{fig:class}}
\end{center}
\end{figure}
In Refs.~\cite{arie92,arie12,arie12b,arie12c}, an effective particle mass $m^\textrm{eff}$ is defined by the relation $\bm p = m^\textrm{eff}\, \bm v$, where $\bm v$ is the group velocity of the associated wave packet. It follows then that
\begin{equation}
\label{meffdef}
m^\textrm{eff}=p\,\left( \frac{d T}{d p} \right)^{-1}.
\end{equation}
Using this definition, a semiclassical interpretation of the system (\ref{AFM1})-(\ref{AFM3}) is possible. If $T_i(p_i)$ is the kinetic energy for the $i$th particle, the kinetic energy for two particles can be written $T_1(p_0)+T_2(p_0)=T(p_0)$, where $p_0$ is the module of the common momentum in the center of mass frame. With the effective mass (\ref{meffdef}), we have
\begin{equation}
\label{meff}
m_i^\textrm{eff}=\frac{p_0}{T_i'(p_0)},
\end{equation}
and the speed of the $i$th particle is given by $v_i=T_i'(p_0)$. As in Ref.~\cite{sema12}, let us assume a classical circular motion for the two particles (see Fig.~\ref{fig:class}). The centripetal force $F_i$ acting on the $i$th particle is given by
\begin{equation}
\label{Fi2}
F_i = m_i^\textrm{eff} \frac{v_i^2}{r_i} = p_0 \frac{T_i'(p_0)}{r_i}.
\end{equation}
If $r_0$ is the distance between the two particles, the rigid rotation constraints, $r_0=r_1+r_2$ and $v_1/r_1 = v_2/r_2$, imply that
\begin{equation}
\label{r0r1r2}
r_0 = r_i \frac{T'(p_0)}{T_i'(p_0)}.
\end{equation}
If the force acting on the $i$th particle comes from the potential $V(r)$ generated by $j$, then $F_1=F_2=V'(r_0)$. (\ref{Fi2}) and (\ref{r0r1r2}) can be recast into the form (\ref{AFM3}), and it is obvious than (\ref{AFM1}) gives the mass of the system. A semiclassical quantification of the total orbital angular momentum gives $r_0\, p_0 = l+1/2$, and we obtain a system very similar to (\ref{AFM1})-(\ref{AFM3}). This supports (\ref{meffdef}) as a good definition for the effective mass.
\section{A toy model}
\label{tm}
In this section, we solve a simple toy model in order to check the relevance of the method. Let us consider the following Hamiltonian with a Gaussian form for the kinetic part
\begin{equation}
\label{TpVexp}
H = \sigma\, m\, \exp\left( \frac{\bm p^2}{2\, m^2}\right) + a\, r^2,
\end{equation}
where the parameter $m$ plays the role of a mass. Indeed, for high values of $m$ ($\gg a^{1/3}$), $H$ reduces to a harmonic oscillator Hamiltonian
\begin{equation}
\label{TpVexplim}
H \to \sigma\, m + \frac{\sigma\, \bm p^2}{2\, m} + a\, r^2 + \textrm{O}\left( \frac{\bm p^4}{m^2}\right).
\end{equation}
The parameter $\sigma=1$ or 2 is the number of particles (arbitrary positive value of $\sigma$ can also be considered to study duality relations between different many-body systems \cite{silv11}).
Such a Hamiltonian is not realistic but it has been chosen because \emph{i)} it admits an analytical lower bound (see below); \emph{ii)} it reduces to the well known case (\ref{TpVexplim}) in a well-defined limit; \emph{iii)} accurate numerical solutions are not easy to obtain (see below). It is more convenient to work with the conjugate dimensionless variables $\bm x = \sqrt{a/(\sigma\, m)}\,\bm r$ and $\bm q = \sqrt{\sigma\, m/a}\,\bm p$ for the dimensionless Hamiltonian $H_{\textrm{d}}=H/(\sigma\, m)$ given by
\begin{equation}
\label{TpVexpred}
H_{\textrm{d}} = \exp\left( k\,\bm q^2 \right) + x^2 \quad \textrm{with} \quad k=\frac{a}{2\,\sigma\, m^3}.
\end{equation}
The corresponding eigenvalues are noted $\epsilon = E/(\sigma\, m)$.
\begin{figure}[ht]
\begin{center}
\includegraphics*[width=0.45\textwidth]{expp2.eps}
\caption{Eigenvalues $\epsilon$ of (\ref{TpVexpred}) as a function of $k$. By increasing energy, the eigenvalues corresponds to $(n,l)=(0,0)$, $(0,1)$ and $(1,0)$. Dots: accurate numerical solutions; Solid black lines: lower bound (\ref{Eexpred}) with $Q=2n+l+3/2$; Dashed grey lines: harmonic oscillator approximation (\ref{Eexpredho}) with $Q=2n+l+3/2$. \label{fig:expp2}}
\end{center}
\end{figure}
Using the set of equations (\ref{AFM1})-(\ref{AFM3}), the following approximate solution can be found
\begin{equation}
\label{Eexpred}
\epsilon_\textrm{app} = \exp\left( 2\, W_0\left( \sqrt{k}\, \frac{Q}{2} \right) \right)\left[ 1+ 2\, W_0\left( \sqrt{k}\, \frac{Q}{2} \right) \right],
\end{equation}
where $W_0(z)$ is the main branch of the Lambert function (also called Omega function or product logarithm) \cite{corl96}. If $k \ll 1$, we can write $\epsilon_\textrm{app} = \epsilon_\textrm{HO} + \textrm{O}\left( k \right)$ with
\begin{equation}
\label{Eexpredho}
\epsilon_\textrm{HO} = 1+2\,\sqrt{k}\, Q.
\end{equation}
This corresponds to the harmonic oscillator approximation. A natural choice is to take $P(x)=V(x)=x^2$. Then, $Q=2n+l+3/2$ and the function $h(x)$ defined by (\ref{hg}) is convex. So, (\ref{Eexpred}) is a lower bound, whose quality is shown on Fig.~\ref{fig:expp2}. The numerical solutions of (\ref{TpVexpred}) have been computed with the the three-dimensional Fourier grid Hamiltonian method \cite{mars89,bali91,brau98} which is particularly well suited for this type of Hamiltonian. This numerical procedure is equivalent to an expansion in a special basis \cite{sema00} and implies the computation of Hamiltonian matrix elements. Because of the exponential function in (\ref{TpVexpred}), these matrix elements can be huge numbers and they must be computed with a very high accuracy to obtain stable and accurate eigenvalues. One can see that the lower bound is quite good and more accurate that the harmonic oscillator approximation when $k$ increases.
\section{Concluding remarks}
\label{conclu}
The envelope theory (or equivalently the auxiliary field method) is a method to compute approximate solutions (generally upper or lower bounds) of Hamiltonians of the form $H=T(p) + V( r)$, where the kinetic part $T$ is a nonrelativistic \cite{hall89,silv08} or a semirelativistic one \cite{hall01b,sema12}. In this paper, it is shown that the method can be used for arbitrary forms of $T$. The idea is to replace the Hamiltonian $H$ by another one $\widetilde H = \widetilde T + \widetilde V$ which is solvable, and with $\widetilde T$ and $\widetilde V$ respectively tangent to $T$ and $V$. Provided $\widetilde T$ is a nonrelativistic kinetic operator and $\widetilde V$ a power-law potential, the approximate eigensolutions can be easily computed by solving a set of three equations which have a natural semiclassical interpretation. Nevertheless, the computation is a full quantum one since eigenvalues and eigenstates are obtained for a well-defined global quantum number $Q$.
A priori, This method can be applied to a wide variety of Hamiltonians relevant for various domains, from atomic to hadronic physics. With a good choice of the power law, the value of $Q$ is analytically known and the eigenvalues obtained can be upper or lower bounds of the genuine energies. If numerical approximations can be easily computed, closed-form formulae can even be obtained for some particular Hamiltonians. The toy model studied here with a harmonic potential and a Gaussian kinetic operator is in the most favorable situation: A quite accurate analytical lower bound is obtained giving deep insights about the structure of the solutions.
\begin{acknowledgments}
C.S. thanks the F.R.S.-FNRS for financial support.
\end{acknowledgments}
|
{
"timestamp": "2012-07-31T02:02:09",
"yymm": "1206",
"arxiv_id": "1206.5960",
"language": "en",
"url": "https://arxiv.org/abs/1206.5960"
}
|
\section{#1}\setcounter{equation}{0}}
\renewcommand{\theequation}{\thesection.\arabic{equation}}
\newdimen\tableauside\tableauside=1.0ex
\newdimen\tableaurule\tableaurule=0.4pt
\newdimen\tableaustep
\def\phantomhrule#1{\hbox{\vbox to0pt{\hrule height\tableaurule width#1\vss}}}
\def\phantomvrule#1{\vbox{\hbox to0pt{\vrule width\tableaurule height#1\hss}}}
\def\sqr{\vbox{%
\phantomhrule\tableaustep
\hbox{\phantomvrule\tableaustep\kern\tableaustep\phantomvrule\tableaustep}%
\hbox{\vbox{\phantomhrule\tableauside}\kern-\tableaurule}}}
\def\squares#1{\hbox{\count0=#1\noindent\loop\sqr
\advance\count0 by-1 \ifnum\count0>0\repeat}}
\def\tableau#1{\vcenter{\offinterlineskip
\tableaustep=\tableauside\advance\tableaustep by-\tableaurule
\kern\normallineskip\hbox
{\kern\normallineskip\vbox
{\gettableau#1 0 }%
\kern\normallineskip\kern\tableaurule}%
\kern\normallineskip\kern\tableaurule}}
\def\gettableau#1{\ifnum#1=0\let\next=\null\else
\squares{#1}\let\next=\gettableau\fi\next}
\tableauside=1.0ex
\tableaurule=0.4pt
\newcommand{\twoVgraph}{\raisebox{0pt}{
\begin{picture}(18,18)(-9,-5)
\put(0,0){\circle{16}} \put(-8,0){\line(1,0){16}}
\end{picture}}}
\newcommand{\fourVgraph}{\raisebox{0pt}{
\begin{picture}(18,26)(-9,-9)
\put(0,0){\oval(16,24)} \put(-8,4){\line(1,0){16}}
\put(-8,-4){\line(1,0){16}}
\end{picture}}}
\newcommand{\sixVgraph}{\raisebox{0pt}{
\begin{picture}(26,24)(-13,-8)
\put(-9,8){\circle{6}} \put(9,8){\circle{6}}
\put(-6,8){\line(1,0){12}} \put(0,-8){\circle{6}}
\put(-9,5){\line(2,-3){7}} \put(9,5){\line(-2,-3){7}}
\end{picture}}}
\newcommand{\eightVgraphI}{\raisebox{0pt}{
\begin{picture}(26,26)(-13,-9)
\put(-9,9){\circle{6}} \put(9,9){\circle{6}}
\put(-9,-9){\circle{6}} \put(9,-9){\circle{6}}
\put(-6,9){\line(1,0){12}}
\put(-9,6){\line(0,-1){12}}
\put(-6,-9){\line(1,0){12}}
\put(9,6){\line(0,-1){12}}
\end{picture}}}
\newcommand{\eightVgraphII}{\raisebox{0pt}{
\begin{picture}(28,28)(-14,-10)
\put(-13,13){\line(1,0){26}}
\put(-13,-13){\line(1,0){26}}
\put(-13,-13){\line(0,1){26}}
\put(13,-13){\line(0,1){26}}
\put(-3,3){\line(1,0){6}}
\put(-3,-3){\line(1,0){6}}
\put(-3,-3){\line(0,1){6}}
\put(3,-3){\line(0,1){6}}
\put(-13,13){\line(1,-1){10}}
\put(-13,-13){\line(1,1){10}}
\put(13,13){\line(-1,-1){10}}
\put(13,-13){\line(-1,1){10}}
\end{picture}}}
\newcommand{\tenVgraphI}{\raisebox{0pt}{
\begin{picture}(26,29)(-13,-9)
\put(-9,9){\circle{6}} \put(9,9){\circle{6}}
\put(-9,-9){\circle{6}} \put(9,-9){\circle{6}}
\put(0,9){\circle{6}}
\put(-6,9){\line(1,0){3}}
\put(6,9){\line(-1,0){3}}
\put(-9,6){\line(0,-1){12}}
\put(-6,-9){\line(1,0){12}}
\put(9,6){\line(0,-1){12}}
\end{picture}}}
\newcommand{\tenVgraphII}{\raisebox{0pt}{
\begin{picture}(28,31)(-14,-10)
\put(-13,13){\line(1,0){10}}
\put(0,13){\circle{6}}
\put(13,13){\line(-1,0){10}}
\put(-13,-13){\line(1,0){26}}
\put(-13,-13){\line(0,1){26}}
\put(13,-13){\line(0,1){26}}
\put(-3,3){\line(1,0){6}}
\put(-3,-3){\line(1,0){6}}
\put(-3,-3){\line(0,1){6}}
\put(3,-3){\line(0,1){6}}
\put(-13,13){\line(1,-1){10}}
\put(-13,-13){\line(1,1){10}}
\put(13,13){\line(-1,-1){10}}
\put(13,-13){\line(-1,1){10}}
\end{picture}}}
\newcommand{\figref}[1]{Fig.~\protect\ref{#1}}
\title{Lectures on non-perturbative effects in large $N$ gauge theories, matrix models and strings}
\author{
Marcos Mari\~no
\\
D\'epartement de Physique Th\'eorique et Section de Math\'ematiques,\\
Universit\'e de Gen\`eve, Gen\`eve, CH-1211 Switzerland\\
\\
\email{marcos.marino@unige.ch}}
\abstract{In these lectures I present a review of non-perturbative instanton effects in quantum theories, with a focus on large $N$ gauge theories and matrix models. I first consider the structure of these effects in the case of ordinary differential equations, which provide a model for more complicated theories, and I introduce in a
pedagogical way some technology from resurgent analysis, like trans-series and the resurgent version of the Stokes phenomenon. After reviewing instanton effects in quantum mechanics and quantum field theory, I address general aspects of large $N$ instantons, and
then present a detailed review of non-perturbative effects in matrix models. Finally, I consider two applications of these techniques in string theory.}
\begin{document}
\sectiono{Introduction}
Many series appearing in Physics and in Mathematics are not convergent. For example, most of the series
obtained by perturbative methods in Quantum Field Theory (QFT) turn out to be asymptotic, rather than convergent. The asymptotic
character of these series is typically an indication that non-perturbative effects should be ``added" in some way to the original perturbative series.
The purpose of these lectures is to provide an introduction to asymptotic series and non-perturbative effects. Since this is already quite a
wide topic, we will restrict our discussion in various ways. First or all, we will consider non-perturbative effects of the
instanton type, i.e. effects due to extra saddle points in the path integral (in particular, we will not consider effects of the renormalon type).
Secondly, we will be particularly interested in the interaction between non-perturbative effects
and large $N$ expansions. Finally, in our discussion we will rely heavily on ``toy models" of non-perturbative effects, and in particular on matrix models. This is not as restrictive as one could think, since matrix models underlie many interesting quantities in string theory and supersymmetric QFTs.
A typical asymptotic series in a coupling constant $g_s$, with instanton corrections, has the following form,
\begin{equation}
\label{simpleseries}
\sum_n a_n g_s^n + {\rm e}^{-A/g_s}\sum_n a_n^{(1)} g_s^n+{\cal O}({\rm e}^{-2A/g_s}).
\end{equation}
Here, the first sum is the original divergent series. The second term is an one-instanton contribution. It has an overall, non-pertubative exponential in $g_s$, multiplying another series (which in general is also asymptotic). The third term indicates higher instanton contributions.
In order to understand this type of quantities, we will develop a three-step approach:
\begin{enumerate}
\item {\it Formal}: we want to be able to compute the terms in the original, perturbative series, as well as the ``non-perturbative quantities" characterizing the instanton
corrections. These include the instanton action $A$, as well as the series multiplying the exponentially small terms. Notice that the resulting object is a series with two
small parameters, $g_s$ and ${\rm e}^{-A/g_s}$, which should be regarded as independent. Such series are called {\it trans-series}. Therefore, the first step in the
understanding of non-perturbative effects is the formal
calculation of trans-series.
\item {\it Classical asymptotics}: the series (\ref{simpleseries}) above is a purely formal expression, since already the first series (the perturbative one) is divergent. One way of making sense of this perturbative series is by regarding it as an asymptotic expansion of
a well-defined function. Finding such a representation, once the original function is given, is the problem addressed by what
I will call ``classical asymptotics." In classical asymptotics, exponential corrections are ill-defined and are never written down explicitly, but they are
lurking in the background: indeed, it might happen that, as we move in the complex plane of the coupling constant, an instanton correction which used to
be exponentially small becomes of order one. This is the basis of the {\it Stokes phenomenon}.
\item {\it Beyond classical asymptotics}: once the classical asymptotics is understood, one can try to go beyond it and use the full information in the formal trans-series
to reconstruct the original function {\it exactly}. A general technique to do this is Borel resummation. The combination of the general theory of
trans-series with Borel resummation gives the {\it theory of resurgence} of Jean \'Ecalle, which is the most powerful method to address this whole circle of questions.
\end{enumerate}
In these lectures we will follow this three-step program in problems with increasing order of complexity, from simple ones, like ordinary differential equations (ODEs), to more difficult ones, like matrix models and string theory. Let us quickly review these different problems in relation to the approach sketched above:
\begin{enumerate}
\item In the theory of {\it ODEs} with irregular singular points, formal solutions are typically divergent series. In this case,
the three steps are well understood.
The first step is almost automatic, since the terms in the trans-series can be
computed recursively, including the instanton corrections. The understanding of the classical asymptotics of these solutions is a venerable subject,
going back to Stokes. The treatment beyond classical asymptotics is
more recent, but has been well established in the work of \'Ecalle, Kruskal, Costin, and others, and we will review some of this work here. A closely related example is the calculation of integrals by the method of steepest descent. Here, the different trans-series are given by the
asymptotic expansion of the integrals around the different critical points. The classical asymptotics of these series is also a well studied subject,
and its resurgent analysis has been also considered by Berry and others.
\item In {\it Quantum Mechanics} (QM) and {\it Quantum Field Theory}, the formal procedure to generate the leading asymptotics series is simply standard perturbation theory. Instanton corrections are obtained by identifying saddle-points of the classical action, and by doing perturbation theory around this instanton background, one obtains exponentially small corrections and
their associated series. The study of classical asymptotics and beyond is more difficult, although many results have been obtained in QM. In realistic QFTs, perturbation theory is so wild that it is not feasible to pursue the program, but in some special QFTs --namely, those without renormalons,
like Chern--Simons (CS) theory in 3d or ${\cal N}=4$ Yang--Mills theory-- there are some partial results. In these lectures I will consider some aspects of the problem for CS theory.
\item An interesting, increasing level of complexity appears when we consider
{\it large $N$ gauge theories}. Here, the computation of formal trans-series becomes more complicated, since
in the $1/N$ expansion we typically have to resum an infinite number of diagrams at each order. For example, in standard QFT, the instanton action is obtained by finding
a classical solution of the equations of motion (EOM) with finite action, while the action of a large $N$ instanton is given by the sum of an infinite number of diagrams.
Another way of understanding this new level of
complexity is simply that in large $N$ theories we have two parameters in the game, $N$ and the coupling constant $g_s$, or
equivalently $g_s$ and the 't Hooft parameter $t=N g_s$. Due to this, the Stokes phenomenon of classical asymptotics becomes more complicated
and leads to {\it large $N$ phase transitions}\footnote{In fact, although it is not widely appreciated, standard phase transitions are examples of the Stokes phenomenon; see \cite{ps} for a nice discussion of this.}. However, in the simple toy case of large $N$ matrix models, the $1/N$ expansion is still very close to the ordinary asymptotic
evaluation of integrals, and we will be able to offer a rather detailed picture of non-perturbative effects.
\item Finally, in {\it string theory} things are even more complicated. Even at the formal level we
cannot go very far: there are rules to compute the genus expansion, but the rules to do (spacetime) instanton calculations are rather {\it ad hoc}. We know that in general instanton corrections involve D-branes or $p$-branes,
and we know how to obtain some qualitative features of their behavior, but a precise framework is still missing due to the lack of a non-perturbative definition.
An alternative avenue is to use large $N$ dualities, relating
string theories to gauge theories, in order to deduce some of these effects from the large $N$ duals. This makes possible to compute formal trans-series for non-critical strings and some simple string models.
\end{enumerate}
The plan of these lectures is the following: in section \ref{ODEs}, I will review some aspects of asymptotics series and in particular of the series appearing in the context of ODEs. This includes formal trans-series, Borel resummation, the Stokes phenomenon, and the connection between large order behavior and trans-series. I have tried to provide as well some very elementary ideas of the theory of resurgence. In section 3, I review non-perturbative effects in QM and in QFT. The results in QM are well known to the expert; they have been analyzed in detail and made rigorous in for example \cite{delabaerepham}. The study of non-perturbative effects in QFT from the point of view advocated in these lectures is still in its infancy, and I have contented myself with explaining some of the results for CS theory, which are probably not so well known. In section 3 I also introduce some general aspects of instanton effects in large $N$ theories which are probably well known to experts, but difficult to find in the literature, and I illustrate them in the simple case of matrix quantum mechanics. Section 4 is devoted to the study of non-perturbative effects in matrix models, starting from the pioneering works of F. David in \cite{david} and explaining as well more recent results where I have been involved. In section 5, I present two applications of the techniques of section 4 to string theory, by using large $N$ dualities. Finally, in section 6 I make some concluding remarks.
\sectiono{Asymptotics, non-perturbative effects, and differential equations}
\label{ODEs}
\subsection{Asymptotic series and exponentially small corrections}
\label{smallcorr}
A series of the form
\begin{equation}
\label{aseries}
S(w)=\sum_{n=0}^{\infty} a_n w^n
\end{equation}
is asymptotic to the function $f(w)$, in the sense of Poincar\'e, if, for every $N$, the remainder after $N+1$ terms of the series is much smaller than the last
retained term as $w\rightarrow 0$. More precisely,
\begin{equation}
\lim_{w\to 0} w^{-N}\left( f(w)-\sum_{n=0}^{N} a_n w^n\right)=0,
\end{equation}
for all $N>0$. In an asymptotic series, the remainder does not necessarily go to zero as $N \rightarrow \infty$ for a fixed $w$, in contrast to what happens in convergent series.
Analytic functions might have asymptotic expansions. For example, the Stirling series for the
Gamma function
\begin{equation}
\label{stirling}
\Bigl( {z \over 2\pi}\Bigr)^{1/2} \Bigl( {z\over e}\Bigr)^{-z}\Gamma(z) = 1+ {1\over 12 z} +{1\over 288 z^2} +\cdots
\end{equation}
is an asymptotic series for $|z| \rightarrow \infty$. Notice that different functions may have the same asymptotic expansion, since
\begin{equation}
\label{expcorr}
f(w) + C{\rm e}^{-A/w}
\end{equation}
has the same expansion around $w=0$ than $f(w)$, for any $C$, $A$.
In practice, asymptotic expansions are characterized by the fact that, as we vary $N$, the partial sums
\begin{equation}
\label{partialsum}
S_N(w)=\sum_{n=1}^N a_n w^n
\end{equation}
will first approach the true value $f(w)$, and then, for $N$ sufficiently big, they will diverge. A natural question is then to obtain the partial
sum which gives the best possible estimate of $f(w)$. To do this, one has to find the $N$ that truncates the asymptotic expansion
in an optimal way. This procedure is called {\it optimal truncation}. Usually, the way to find the optimal value of $N$ is to retain terms up to the smallest
term in the series, discarding all terms of higher degree. Let us assume (as it is the case in all interesting examples) that the coefficients $a_n$ in (\ref{aseries}) grow factorially at large $n$,
\begin{equation}
\label{aas}
a_n \sim A^{-n} n! \, .
\end{equation}
The smallest term in the series, for a fixed $|w|$, is obtained by minimizing $N$ in
\begin{equation}
\left|a_N w^N \right| =c N! \left|{w \over A}\right|^N.
\end{equation}
By using the Stirling approximation, we rewrite this as
\begin{equation}
c \exp \left\{ N \left( \log \, N -1 -\log \, \left|{A \over w}\right| \right) \right\}.
\end{equation}
The above function has a saddle at large $N$ given by
\begin{equation}
\label{optimalN}
N_*= \left|{A \over w}\right|.
\end{equation}
If $|w|$ is small, the optimal truncation can be performed at large values of $N$, but as $|w|$ increases, less and less terms of the series can be used.
We can now estimate the error made in the optimal truncation by evaluating the next term in the asymptotics,
\begin{equation}
\label{nonpertamb}
\epsilon(w) =C_{N_*+1} |w|^{N_*+1} \sim {\rm e}^{-|A/w|}.
\end{equation}
Therefore, the maximal ``resolution" we can expect when we reconstruct a function $f(w)$ from an asymptotic expansion is of order $\epsilon(w)$. This ambiguity
is sometimes called a {\it non-perturbative ambiguity}. The reason for this name is that perturbative series are often asymptotic, therefore they do not
determine by themselves the function $f(w)$, and some additional, non-perturbative information is required. Notice that the absolute value of $A$ gives the
``strenght" of this ambiguity.
\begin{figure}[!ht]
\leavevmode
\begin{center}
\includegraphics[height=4cm]{optimal.pdf}\qquad \includegraphics[height=4cm]{optimal2.pdf}
\end{center}
\caption{We illustrate the method of optimal truncation for the quartic integral (\ref{quarticint}) by plotting the difference (\ref{diffexact}) between the integral and the partial sum of order $N$ of its asymptotic expansion, as a function of $N$, for $g=0.02$ (left) and $g=0.05$ (right).}
\label{optimal}
\end{figure}
It is instructive to see optimal truncation at work in a simple example. Let us consider the quartic integral
\begin{equation}
\label{quarticint}
I(g) = {1\over {\sqrt {2\pi}}} \int_{-\infty}^{\infty} {\rm d} z \, {\rm e}^{-z^2/2 -g z^4/4},
\end{equation}
which is well-defined as long as ${\rm Re}(g)>0$. The asymptotic expansion of this integral for small $g$ can be obtained simply by first expanding
the exponential of the quartic perturbation and then integrate the resulting series term by term. One obtains,
\begin{equation}
\label{quarticseries}
I(g) =\sum_{k=0}^{\infty} a_k g^k,
\end{equation}
where
\begin{equation}
a_k = {(-4)^{-k} \over {\sqrt {2\pi}}} \int_{-\infty} ^{\infty} {\rm d} z {z^{4k} \over k!} {\rm e}^{-z^2/2} =(-4)^{-k} {(4k-1)!! \over k!}.
\end{equation}
This series has a zero radius of convergence and provides an asymptotic expansion of the integral $I(g)$. The asymptotic behavior
of the coefficients at large $k$ is obtained immediately from Stirling's formula
\begin{equation}
\label{zkasym}
a_k \sim (-4)^k k!,
\end{equation}
therefore $|A|=1/4$ in this case. In \figref{optimal} we plot the difference
\begin{equation}
\label{diffexact}
\left| I(g) -S_N(g)\right|
\end{equation}
as a function of $N$, for two values of $g$. The optimal values are seen to be $N_*=12$ and $N_*=5$, in agreement with the estimate (\ref{optimalN}).
Perhaps the most important problem in asymptotic analysis is how to go beyond optimal truncation, incorporating in a systematic way the small exponential effects (\ref{expcorr}). In this first section we will address this problem in the context of asymptotic series appearing in ODEs.
\subsection{Formal power series and trans-series in ODEs}
Very often, the solution to a physical or mathematical problem is given by an asymptotic series, and one has a systematic procedure to calculate this series at any desired order (like for example in perturbation theory in QM). The first question we want to ask is the following: can we calculate, at lest {\it formally}, non-perturbative effects of exponential type (i.e. like the one in (\ref{expcorr})) which must be added to an asymptotic series?
Since these terms are typically not taken into account in classical asymptotics, in order to include them we have to consider generalizations of asymptotic series.
The resulting objects are called {\it trans-series} and they were first considered in a systematic way by Jean \'Ecalle in his work on ``resurgent analysis" \cite{ecalle}.
An important class of asymptotic series and trans-series are the formal solutions to ODEs with irregular singular points. The simplest example is probably Euler's equation
%
\begin{equation}
\label{eulereq}
{{\rm d} \varphi \over {\rm d} z}+A \varphi(z) ={A\over z}
\end{equation}
%
which has an irregular singular point at $z=\infty$.
There is a formal power-series solution to this equation of the form
%
\begin{equation}
\label{solEuler}
\varphi_0(z)=\sum_{n=0}^{\infty} {a_n \over z^{n+1}}, \qquad a_n=A^{-n} n!.
\end{equation}
%
This solution is an asymptotic series, with zero radius of convergence, since the coefficients grow factorially with $n$.
It is easy to see that one can construct a family of formal solutions to the Euler's ODE based on $\varphi_0(z)$,
%
\begin{equation}
\label{eulerts}
\varphi(z) =\varphi_0(z) +C{\rm e}^{-Az}
\end{equation}
%
where $C$ is an arbitrary constant parametrizing the family of solutions. This is our first example of a {\it trans-series}, and it has three important properties:
\begin{enumerate}
\item The term added in (\ref{eulerts}) is non-analytic at $z=\infty$, and it goes beyond the standard solution in the form of an asymptotic series given by (\ref{solEuler}).
It is invisible in the ``perturbative" expansion around $z=\infty$, and it is therefore a ``non-perturbative" correction.
\item The resulting formal expression has {\it two} small parameters, $1/z$ and ${\rm e}^{-A z}$.
\item There is a relation, already pointed out above in the context of optimal truncation, between the ``strength" of
the non-perturbative effect, given by $A$, and the divergence of the asymptotic series.
Namely, $A$ encodes the next-to-leading behavior of $a_n$ at large $n$.
\end{enumerate} These properties are typical of formal trans-series and will reappear in many examples.
A more complicated example of an ODE with a trans-series solution is the well-known {\it Airy equation},
\begin{equation}
\varphi''=x \varphi.
\end{equation}
The solutions to this equation, called Airy functions, are ubiquitous in physics. We are now interested in formal power series solutions to this equation around $x=\infty$.
It is easy to see that one such a solution is given by
\begin{equation}
\label{fullai}
Z_{\rm Ai}(x)={1\over 2x^{1/4} {\sqrt{\pi}}} {\rm e}^{-2 x^{3/2}/3}
\sum_{n=0}^{\infty} a_n x^{-3n/2},
\end{equation}
where
\begin{equation}\label{anairy}
a_n = {1\over 2\pi} \Bigl( -{3\over 4} \Bigr)^n {\Gamma(n+{5\over 6}) \Gamma(n+{1\over 6} ) \over n!}.
\end{equation}
Here, the coefficients grow as
\begin{equation}
\label{aigrow}
a_n \sim A^{-n} n!, \qquad A =-{4\over 3}.
\end{equation}
What is the trans-series solution to this equation? As it is well-known, there is another, independent formal power series solution to the Airy equation, given by
\begin{equation}
\label{fullbi}
Z_{\rm Bi}(x)={1\over 2x^{1/4} {\sqrt{\pi}}} {\rm e}^{2 x^{3/2}/3}
\sum_{n=0}^{\infty} (-1)^n a_n x^{-3n/2}.
\end{equation}
The general ``trans-series solution" to the Airy differential equation is just a linear combination of these two formal asymptotic series,
%
\begin{equation}
C_1 Z_{\rm Ai}(x)+C_2 Z_{\rm Bi}(x).
\end{equation}
Notice that $Z_{\rm Ai}(x)$ and $Z_{\rm Bi}(x)$ have different leading exponential behavior. Moreover, the relation between these behaviors is in accord with the third property noted above in the context of Euler's equation: the growth (\ref{aigrow}) of the coefficients suggests that the trans-series added to the asymptotic solution (\ref{fullai}) should have a relative exponential weight of
\begin{equation}
{\rm e}^{4 x^{3/2}/3}
\end{equation}
as compared to (\ref{fullai}), which is indeed the case.
In the case of non-linear ODEs the structure of trans-series solutions is much richer: linear ODEs have trans-series with a finite number of terms, while in nonlinear ODEs they have an infinite number of terms. A class of important examples which are relevant in many physical applications are the
{\it Painlev\'e transcendants}. We will focus on the cases of Painlev\'e I (PI) and Painlev\'e II (PII).
\begin{example} {\it Painlev\'e I}.
The PI equation is
%
\begin{equation}
\label{p1}
u(\kappa)^2-{1\over 6}u''(\kappa)=\kappa.
\end{equation}
This equation appears in many contexts. In particular, it gives the all-genus solution to two-dimensional quantum gravity (see \cite{dfgzj} for a review). There is a formal solution to this equation which goes like $u(\kappa) \sim {\sqrt{\kappa}}$ as $\kappa \rightarrow \infty$:
%
\begin{equation}
\label{asymp1}
u^{(0)}(\kappa)=\kappa^{1/2 } \sum_{n=0}^\infty u_{0,n} \kappa^{-5n/2}=\kappa^{1/2} \biggl( 1-{1\over 48} \kappa^{-{5 \over 2}} -{49 \over 4608} \kappa^{-5} -{1225 \over 55 296} \kappa^{-{15\over 2}}+\cdots\biggr).
\end{equation}
The {\it trans-series} solution to Painlev\'e I is a one-parameter family of solutions to
(\ref{p1}) which includes exponentially suppressed terms as $\kappa\rightarrow \infty$:
\begin{equation}
\label{transseries}
u(\kappa) = \sum_{\ell=0}^{\infty}C^{\ell} u^{(\ell)}(\kappa)=
{\sqrt {\kappa}} \sum_{\ell=0}^{\infty} C^{\ell} \kappa^{-{5 \ell \over 8}} {\rm e}^{-\ell A \kappa^{5/4}} \epsilon^{(\ell)}(\kappa),
\end{equation}
where $C$ is a parameter, the constant $A$ has the value
\begin{equation}
\label{aaction}
A={8 {\sqrt 3}\over 5}
\end{equation}
and
\begin{equation}
\label{el}
\epsilon^{(\ell)}(\kappa)=\sum_{n=0}^{\infty} u_{\ell,n} \kappa^{-5n/4}
\end{equation}
are asymptotic series. Since we have introduced an arbitrary constant $C$ in (\ref{transseries}), we can
normalize the solution such that $u_{1,0}=1$. We will refer to the above series $u^{(\ell)}(\kappa)$ with $\ell \ge 1$ as the $\ell$-th instanton
solution of PI, while the solution $u^{(0)}(\kappa)$ will be referred to as the ``perturbative" solution.
It is easy to see that the $\ell$-th instanton solutions $u^{(\ell)}$ in the trans-series satisfy linear ODEs. For example, one has for $\ell=1$,
\begin{equation}
\left(u^{(1)}\right)''(\kappa)=12 u^{(1)}(\kappa) u^{(0)}(\kappa),
\end{equation}
and from this one finds,
\begin{equation}
\epsilon^{(1)}(\kappa)= 1 - {5 \over 64 {\sqrt {3}}} z^{-{5\over 4}}
+ {75 \over 8192} z^{-{5\over 2}} - {341329 \over 23592960 {\sqrt{3}}} z^{-{15\over 4}} +\cdots.
\end{equation}
Recursion relationships for the coefficients $u_{\ell,n}$ in (\ref{el}) can be found in \cite{gikm}.
\end{example}
\begin{example} \label{exp2} {\it Painlev\'e II}. The Painlev\'e II equation is
\begin{equation}
\label{p2}
u''(\kappa)-2 u^3(\kappa)+ 2\kappa u(\kappa)=0
\end{equation}
This equation is of fundamental importance in, for example, random matrix theory and non-critical string theory. It appears in the celebrated Tracy--Widom law governing the statistics of the largest eigenvalue in a Gaussian ensemble of random matrices \cite{tw}, in the double-scaling limit of unitary matrix models \cite{ps} and of two-dimensional Yang--Mills theory \cite{gm}, and it also governs the all-genus free energy of two-dimensional supergravity \cite{kms}. As in the case of PI, there is a formal solution to PII which goes like $u(\kappa) \sim {\sqrt{\kappa}}$ as $\kappa \rightarrow \infty$:
\begin{equation}
\label{pertp2}
u^{(0)}(\kappa)= {\sqrt{\kappa}} -
\frac{1}{16\,\kappa^{\frac{5}{2}}} - \frac{73}{512\,\kappa^{\frac{11}{2}}}-
\frac{10657}{8192\,\kappa^{\frac{17}{2}}} - \frac{13912277}{542888\,\kappa^{\frac{23}{2}}}
+\cdots, \qquad \kappa \rightarrow \infty.
\end{equation}
One can consider as well exponentially suppressed corrections to this ``perturbative" behavior and construct a formal trans-series solution with the structure,
\begin{equation}
\label{p2trans}
u(\kappa) =\sum_{\ell=0}^{\infty} C^{\ell} u^{(\ell)}(\kappa)={\sqrt {\kappa}}
\sum_{\ell=0}^{\infty} C^{\ell} \kappa^{-{3 \ell \over 4}} {\rm e}^{-\ell A \kappa^{3/2}} \epsilon^{(\ell)}(\kappa), \quad \kappa \rightarrow \infty,
\end{equation}
where
\begin{equation}
A={4\over 3}
\end{equation}
and
\begin{equation}
\label{eltwo}
\epsilon^{(\ell)}(\kappa)=\sum_{n=0}^{\infty} u_{\ell,n} \kappa^{-3n/2}.
\end{equation}
As before, we normalize the solution with $u_{1,0}=1$.
The perturbative part $u^{(0)}(\kappa)$ is given by (\ref{pertp2}). The instanton expansions can be easily found by plugging the trans-series
ansatz in the Painlev\'e II equation. One finds, for example, for the one-instanton solution,
\begin{equation}
\epsilon^{(1)}(\kappa)=1-{17\over 96} \kappa^{-3/2} + {1513 \over 18432} \kappa^{-3} -\cdots \, .
\end{equation}
\end{example}
After these examples, we can now give some more formal definitions (see \cite{costin}). Let
\begin{equation}
{\boldsymbol \varphi}'={\bf f}(z,{\boldsymbol \varphi}),
\end{equation}
be a rank $n$ system of non-linear differential equations. We assume that ${\bf f}(z,{\boldsymbol \varphi})$ is analytic at $(\infty,{\bf 0})$.
Let $\lambda_i$, $i=1,\cdots, n$, be the eigenvalues of the linearization
\begin{equation}
\hat \Lambda= -\Bigl( {\partial f_i \over \partial \varphi_j} (\infty,{\bf 0})\Bigr)_{i,j=1, \cdots, n}.
\end{equation}
By using various changes of variables, one can always bring the system to the so-called {\it normal} or {\it prepared} form
\begin{equation}
\label{matrices}
{\boldsymbol \varphi}'=-\Lambda {\boldsymbol \varphi} -{1\over z} B {\boldsymbol \varphi}+ {\bf g}(z, {\boldsymbol \varphi}),
\end{equation}
where
\begin{equation}
\Lambda={\rm diag} \, (\lambda_1, \cdots, \lambda_n), \qquad B={\rm diag}(\beta_1, \cdots, \beta_n),
\end{equation}
and by construction ${\bf g}(z, {\boldsymbol \varphi})= {\cal O}(|{\boldsymbol \varphi}|^2, z^{-2}{\boldsymbol \varphi})$. We also choose variables in such a way that $\lambda_1>0$.
\begin{example} {\it Airy equation in prepared form}.
Define
\begin{equation}
\label{xairy}
z=x^{3/2},
\end{equation}
so that the Airy equation reads
\begin{equation}
\varphi''(z)={4\over 9} \varphi(z) -{1\over 3z} \varphi'(z).
\end{equation}
Let us consider the matrix
\begin{equation}
S(z)=\begin{pmatrix} -3/2 & 3/2 \\ 1-{1\over 4 z} & 1 +{1\over 4 z} \end{pmatrix}.
\end{equation}
If we write
\begin{equation}
{\bf u}(z)=S^{-1}(z) {\boldsymbol \varphi} (z), \qquad {\boldsymbol \varphi}(z)=\begin{pmatrix} \varphi(z) \\ \varphi'(z) \end{pmatrix},
\end{equation}
we find
\begin{equation}
{\bf u}'(z) =-\Lambda {\bf u} -{1\over z} B {\bf u} +{\bf g}(z,{\bf u}),
\end{equation}
with
\begin{equation}
\Lambda=\left(
\begin{array}{ll}
2/3 & 0 \\
0 & -2/3
\end{array}
\right), \qquad B=\left(
\begin{array}{ll}
\frac{1}{6 } & 0 \\
0 & \frac{1}{6 }
\end{array}
\right), \qquad {\bf g}(z,{\bf u})={5\over 48 z^2}\begin{pmatrix} 1 & -1 \\ 1& -1 \end{pmatrix} {\bf u}.
\end{equation}
\end{example}
The {\it formal trans-series solution} to (\ref{matrices}) is of the form
\begin{equation}
\label{trans}
{\boldsymbol \varphi}= {\boldsymbol \varphi}_0 + \sum_{ {\bf k} \in {\mathbb N}^n\backslash \{ 0\} } {\bf C}^{\bf k} {\rm e}^{-{\bf k} \cdot {\boldsymbol \lambda} z} z^{-{\bf k} \cdot {\boldsymbol \beta}} {\boldsymbol \varphi}_{\bf k},
\end{equation}
where
\begin{equation}
{\bf C} =(C_1, \cdots, C_n)
\end{equation}
are free parameters and
\begin{equation}
{\bf C}^{\bf k}=C_1^{k_1}\cdots C_n^{k_n}.
\end{equation}
The functions $ {\boldsymbol \varphi}_0$ and ${\boldsymbol \varphi}_{\bf k}$ are formal power series, of the form
\begin{equation}
{\boldsymbol \varphi}_{\bf k}=\sum_{n\ge 0} {\boldsymbol \varphi}_{{\bf k};n} z^{-n}
\end{equation}
We will also denote
\begin{equation}
|{\bf k}|=\sum_i k_i.
\end{equation}
\begin{remark} For linear systems, like the Airy equation, all the trans-series ${\boldsymbol \varphi}_{\bf k}$ vanish when $|{\bf k}|\ge 2$, so the general trans-series solution is of the form
\begin{equation}
{\boldsymbol \varphi} = {\boldsymbol \varphi}_0 + \sum_{i=1}^n C_i {\rm e}^{- \lambda_i z} z^{-\beta_i} {\boldsymbol \varphi}_{i}.
\end{equation}
%
\end{remark}
\subsection{Classical asymptotics and the Stokes phenomenon}
\begin{quotation}
Stokes, by mathematical supersubtlety, transformed Airy's integrals...
\begin{flushright}
Lord Kelvin, ``The scientific work of Georges Stokes."
\end{flushright}
\end{quotation}
\begin{figure}[!ht]
\leavevmode
\begin{center}
\includegraphics[height=6cm]{airycontours.pdf}
\end{center}
\caption{Three paths which lead to solutions of the Airy equation.}
\label{airycontours}
\end{figure}
So far we have worked at a formal level and we have obtained formal solutions, in terms of asymptotic series, to ordinary differential equations.
A natural questions is: what is the meaning of these formal power series? In order to answer this question, it is useful to consider in detail the case of the Airy equation and the Airy function, and to use an integral representation (we follow here the discussion in chapter 4 of \cite{miller}). Let us consider the integral
\begin{equation}
\label{zoneg}
I_\gamma={1\over 2\pi {\rm i}} \int_{\gamma} {\rm d} z \, {\rm e}^{S(z)}, \qquad S(z)=x z-{z^3\over 3},
\end{equation}
%
where $\gamma$ is a path which makes the integral convergent. Three such paths are shown in \figref{airycontours}, but not all of them are independent, since
%
\begin{equation}
\gamma_1+\gamma_2 + \gamma_3=0.
\end{equation}
%
It is easy to see that the above integral (\ref{zoneg}) gives a solution to the Airy differential equation. We will now focus on the function defined by the path $\gamma_1$:
\begin{equation}
{\rm Ai}(x)={1\over 2\pi {\rm i}} \int_{\gamma_1} {\rm d} z \, {\rm e}^{x z-{z^3\over 3}},
\end{equation}
which defines the Airy function ${\rm Ai}(x)$. This function is analytic in the complex plane. We set
\begin{equation}
x=r{\rm e}^{{\rm i} \kappa}
\end{equation}
and rescale the integrand
\begin{equation}
z = u r^{1/2}.
\end{equation}
We find in this way
\begin{equation}
{\rm Ai}(x)={r^{1/2} \over 2\pi {\rm i}} \int_{\gamma_1} {\rm e}^{r^{3/2} ({\rm e}^{{\rm i} \kappa} u -u^3/3)} {\rm d} u.
\end{equation}
We now want to study the behavior of this function for $|x| \gg 1$, by using the saddle-point method. We then focus on the integral
\begin{equation}
\label{lamint}
\int_{\gamma_1} {\rm e}^{\lambda S_{\kappa}(u)} {\rm d} u
\end{equation}
where
\begin{equation}
S_{\kappa}(u) = {\rm e}^{{\rm i} \kappa} u -{u^3\over 3}.
\end{equation}
There are two saddle points
\begin{equation}
u^{\rm R}={\rm e}^{{\rm i} \kappa/2}, \qquad u^{\rm L}=-{\rm e}^{{\rm i} \kappa/2}
\end{equation}
with ``actions"
\begin{equation}
S_{\kappa}(u^{{\rm R},{\rm L}})=\pm {2\over 3} \exp\left( {3 {\rm i} \over 2} \kappa\right).
\end{equation}
It is easy to see that the formal asymptotic power series around the saddle point $u^{\rm L}$ is precisely (\ref{fullai}), while the one around $u^{\rm R}$ is (\ref{fullbi}).
We introduce the notation
\begin{equation}
R_{\kappa}^{ {\rm R}, {\rm L}} ={\rm Re}\left(S_{\kappa}(u^{{\rm R},{\rm L}})\right), \qquad
I_{\kappa}^{ {\rm R}, {\rm L}} ={\rm Im}\left(S_{\kappa}(u^{{\rm R},{\rm L}})\right).
\end{equation}
The paths of steepest descent (ascent) passing through these points are those where the function ${\rm Re}(S_{\kappa}(u))$ decreases (respectively, increases) most rapidly, as we move away from the critical point. We will also denote by $\gamma^{R,L}_\kappa$ the steepest descent paths through $u^{{\rm R},{\rm L}}$, respectively. We show some paths of steepest descent and ascent in \figref{thimbles}.
\begin{figure}[!ht]
\leavevmode
\begin{center}
\includegraphics[height=8cm]{thimbles.pdf}
\end{center}
\caption{Saddle-point analysis of the integral (\ref{lamint}) for different values of $\kappa$. The red dot on the left is the critical point $u^{\rm L}$, while the
black point on the right is the critical point $u^{\rm R}$. The continuous lines represent paths of steepest descent, while the dashed lines are paths of steepest ascent.}
\label{thimbles}
\end{figure}
Let us now study what happens as we change the angle $\kappa$. For
\begin{equation}
|\kappa| < {2\pi \over 3}
\end{equation}
the path $\gamma_1$ can be deformed into a path of steepest descent through the saddle point at $u^{\rm L}$ (for $\kappa=0, \pi/3$, the steepest descent paths are shown in
\figref{thimbles}.) We therefore have
\begin{equation}
\label{oneint}
{\rm Ai}(x)=I_{\gamma_L^\kappa}, \qquad |\kappa| < {2\pi \over 3},
\end{equation}
which leads to the asymptotics,
\begin{equation}
\label{haione}
{\rm Ai}(x) \sim Z_{\rm Ai}(x), \qquad |\kappa| < {2\pi \over 3}.
\end{equation}
When
\begin{equation}
|\kappa|=2\pi/3
\end{equation}
the steepest descent path coming from the saddle at $u^{\rm L}$ runs right into the other saddle point. At this angle we have
\begin{equation}
\label{eqim}
{\rm Im}(S_{\kappa}(u^L))={\rm Im}(S_{\kappa}(u^R)).
\end{equation}
Values of $\kappa$ for which this happens are called {\it Stokes lines}. This is the place where the second saddle might start contributing to the integral. In fact, for
\begin{equation}
{2\pi \over 3}<| \kappa|<\pi
\end{equation}
the contour $\gamma_1$ gets deformed into a steepest descent path passing through $u^L$ {\it together} with a steepest descent path
passing through $u^R$, and
\begin{equation}
\label{stokesints}
{\rm Ai}(x)=I_{\gamma_L^\kappa}+ I_{\gamma_R^\kappa}.
\end{equation}
However, in this range of $\kappa$ the saddle $u^R$ gives an exponentially suppressed contribution to the asymptotics. In classical
asymptotic analysis, subleading exponentials are not taken into account, and the asymptotics is still given by the contribution from $u^L$:
\begin{equation}
{\rm Ai}(x) \sim Z_{\rm Ai}(x), \qquad |\kappa| < \pi.
\end{equation}
However, when $x<0$, both saddles have the same real part
\begin{equation}
{\rm Re}(S_{\kappa}(u^L))={\rm Re}(S_{\kappa}(u^R)).
\end{equation}
A line where this occurs is called an {\it anti-Stokes line}. Therefore, both saddles contribute to the asymptotics, which is then given by a linear combination of the two trans-series
$Z_{\rm Ai}$ and $Z_{\rm Bi}$ (the precise combination can be obtained by a more detailed analysis, see \cite{miller}). One finally obtains an {\it oscillatory} asymptotics:
\begin{equation}
\label{oscila}
{\rm Ai}(x)\sim {|x|^{-1/4} \over {\sqrt{\pi}}} \cos \Bigl( {2\over 3} |x|^{3/2} -{\pi \over 4}\Bigr), \qquad x<0, \, \, |x| \rightarrow \infty.
\end{equation}
The fact that different asymptotic formulae hold on different directions for the same analytic function is called the {\it Stokes phenomenon}.
From the point of view of saddle-point analysis,
what is happening is that the saddle point which appeared on the Stokes line, at $\kappa=2\pi/3$, is no longer subdominant at $\kappa=\pi$, and it has to be included in the
asymptotics.
\begin{figure}[!ht]
\leavevmode
\begin{center}
\includegraphics[height=6.5cm]{Airystokes.pdf}
\end{center}
\caption{Saddle-point analysis of the Airy function ${\rm Ai}(x)$. Full lines (in red) are Stokes lines, while
dashed lines (in blue) are anti-Stokes lines. On the Stokes lines $\kappa =\pm 2\pi/3$, a second saddle appears in the
integration contour. This saddle is subdominant when $2\pi/3\le |\kappa|<\pi$ and does not contribute to classical asymptotics. However, at $\kappa=\pi$, the saddle
is not subdominant anymore and leads to an oscillatory asymptotics.}
\label{airystokes}
\end{figure}
The saddle-point analysis of the Airy integral is summarized in \figref{airystokes}. There are Stokes lines at
\begin{equation}
\text{Stokes}: \qquad {\rm arg}(x)=0, \qquad {\rm arg}(x)=\pm {2 \pi \over 3},
\end{equation}
and anti--Stokes lines at
\begin{equation}
\text{anti--Stokes}: \qquad {\rm arg}(x)=\pi, \quad \pm {\pi \over 3}.
\end{equation}
In light of the example of the Airy function, we can now understand the meaning of the formal trans-series solutions to ODEs. ODEs have ``true" solutions which are, generically, meromorphic functions on the complex plane. The classical asymptotics of these solutions is given by particular trans-series solutions, i.e. by linear
combinations of formal solutions to the ODE. Due to the Stokes phenomenon, this combination changes as we change the angular sector where we study the asymptotics. Therefore,
formal trans-series solutions are the ``building blocks" for the asymptotics of actual solutions.
In the context of systems of ODEs, Stokes and anti--Stokes lines are defined as follows.
Let us consider our system in prepared form (\ref{matrices}). The directions where an exponential is purely oscillatory, i.e.
\begin{equation}
{\rm Re}(\lambda_i z)=0
\end{equation}
are called {\it anti-Stokes lines}. Along these directions, terms which used to be exponentially suppressed become of the same order than the leading term. This leads to an oscillatory asymptotic behavior. The directions where
\begin{equation}
\label{antistokes}
{\rm Im}(\lambda_i z)=0,
\end{equation}
are called {\it Stokes lines}. These are the directions where subleading exponentials start contributing to the asymptotics. If we consider the Airy equation in prepared form, the eigenvalues are $\pm 2/3$. In terms of the variable $z$ defined in (\ref{xairy}), the Stokes lines are at ${\rm arg}(z)=0, \pi$, while the anti--Stokes lines occur at
${\rm arg}(z)=\pm \pi/2$, which, when translated to the variable $x$, give the structure found above.
\begin{figure}[!ht]
\leavevmode
\begin{center}
\includegraphics[height=3.5cm]{tritronquee.pdf}
\end{center}
\caption{The sectors of the $\kappa$-complex plane of opening $8\pi/5$ where the tritronqu\'ee
solutions $u_{-2}$, $u_{-1}$, $u_0$, $u_1$, $u_2$ are
represented by the formal series $u^{(0)}(\kappa)$. The dots in the remaining sector of opening $2\pi/5$ represent the infinite number of poles that the tritronqu\'ee
solutions have there.}
\label{tritronquee}
\end{figure}
\begin{example} {\it Painlev\'e I and its tritronqu\'ee solutions}.
The meaning of the formal power series solution to PI (\ref{asymp1}) can be clarified by looking at actual solutions of the Painlev\'e I ODE.
It can be shown (see for example \cite{painlevebook} and \cite{kapaev}) that there
exist five different genuine meromorphic solutions of (\ref{p1}) with the
asymptotic power expansion (\ref{asymp1}) as $z\to\infty$ in one of the sectors of the
$z$-complex plane of opening $8\pi/5$, see \figref{tritronquee}:
\begin{equation}
\label{u0_gen}
\begin{aligned}
u_0(z) &\sim u^{(0)}(z), \quad
\arg z\in\bigl(-\tfrac{6\pi}{5},\tfrac{2\pi}{5}\bigr),
\\
u_k(z) &= {\rm e}^{-{\rm i}\frac{8\pi}{5}k}u_0\bigl({\rm e}^{-{\rm i}\frac{4\pi}{5}k}z\bigr)
\sim u^{(0)}(z), \quad
\arg z\in\bigl(-\tfrac{6\pi}{5}+\tfrac{4\pi}{5}k,\tfrac{2\pi}{5}
+\tfrac{4\pi}{5}k\bigr), \quad k=1, \cdots, 5.
\end{aligned}
\end{equation}
Along the remaining sector of opening $2\pi/5$, the asymptotics involves elliptic functions and the solutions have an infinite number of poles.
\end{example}
\subsection{Beyond classical asymptotics: Borel resummation}
So far we have discussed formal power series and trans-series. These formal power series give asymptotic approximations of well-defined functions
which in the case of ODEs are their ``true" solutions. Our next question is: to which extent can we recover the original, ``non-perturbative" solution, from its
asymptotic representation? Since asymptotic series are divergent, the answer is not obvious.
In fact, various answers have been proposed to this question. The more traditional answer is to use the optimal truncation procedure discussed in section \ref{smallcorr}. This gives a reasonable approximation to the original function in some regions of the complex plane, but it typically becomes a bad one in other regions. A nice illustration is provided again by the Airy function. Let us
run the following numerical experiment, proposed by Berry in \cite{berry}. In optimal truncation, we approximate
\begin{equation}
\label{optimalapp}
{\rm Ai}(x) \approx {1\over 2 {\sqrt {\pi}} x^{1/4}} {\rm e}^{ -2/3 x^{3\over 2}} \sum_{n=0}^{N^*}
a_n x^{-3n/2},
\end{equation}
where
\begin{equation}
N^* = \Bigl[ 4/3 |x|^{3\over 2}\Bigr],
\end{equation}
and $[ \, ]$ denotes the integer part. Let us now plot the parametrized curve
\begin{equation}
({\rm Re}({\rm Ai}(|x|{\rm e}^{{\rm i} \kappa})), {\rm Im}({\rm Ai}(|x| {\rm e}^{{\rm i} \kappa}))), \qquad 0< \kappa < \pi,
\end{equation}
in the complex plane, for fixed $|x|=1.7171$, and let us compare it with the corresponding curve computed by using
the r.h.s. of (\ref{optimalapp}). The result is shown in \figref{seca}.
\begin{figure}[!ht]
\leavevmode
\begin{center}
\includegraphics[height=7cm]{AiSA.pdf}
\end{center}
\caption{Parametrized plot of the real and imaginary parts of the exact Airy function (red) compared to the optimal truncation of the asymptotic approximation (blue). The thin green line signals the angle $\kappa =2 \pi/3$.}
\label{seca}
\end{figure}
The approximation is quite good in the region
\begin{equation}
0<{\rm arg}(x) < {2\pi \over 3}
\end{equation}
and it becomes worst and worst as we approach ${\rm arg}(x)=\pi$. The reason is simple: we are missing an exponentially small correction! This correction is born on the
Stokes line ${\rm arg}(x) = {2\pi \over 3}$ and becomes more and more important as we approach the anti-Stokes line, where it is of the same order than the term we are keeping. It is clear that, in order to reproduce the full function, we must find a way to incorporate these exponentially small corrections. Notice as well that, in optimal truncation, only a finite number of terms in the asymptotic expansion are actually used, and the remaining terms cannot be used to improve the estimate.
The most powerful way to go beyond optimal truncation and solve the above problems is probably the technique of {\it Borel resummation}. Let
\begin{equation}
\label{seriesone}
\varphi(z)=\sum_{n\ge 0} {a_n \over z^{n}},
\end{equation}
be a factorially divergent series with $a_n \sim n!$. Its {\it Borel transfom} is defined by
\begin{equation}
\label{boreltrans}
\widehat \varphi (\zeta)=\sum_{n\ge 1} a_{n} {\zeta ^{n-1} \over (n-1)!}.
\end{equation}
This series defines typically a function which is analytic in a neighboorhood of the origin. If the resulting function can be analytically continued to a neigbourhood
of the positive real axis, in such a way that the Laplace transform
\begin{equation}
\label{borelr}
\int_0^{\infty} {\rm e}^{-z \zeta} \widehat \varphi (\zeta) \, {\rm d} \zeta
\end{equation}
converges in some region of the $z$-plane, then the series $\varphi(z)$ is said to be {\it Borel summable} in that
region. In that case,
\begin{equation}
s(\varphi)(z) =a_0 +\int_0^{\infty} {\rm e}^{-z \zeta} \widehat \varphi (\zeta) \, {\rm d} \zeta.
\end{equation}
defines a function whose asymptotics
coincides with the original, divergent series $\varphi(z)$, and $s(\varphi)(z)$ is called the {\it Borel sum} of $\varphi(z)$.
\begin{remark} There are other, equivalent definitions of Borel transform in the literature. In most of the physics literature (like for example \cite{caliceti-report})
the Borel transform of the series (\ref{seriesone}) is defined as
\begin{equation}
B_\varphi(\zeta)= \sum_{n \ge 0} {a_n \over n!} \zeta^n
\end{equation}
and we have the relationship
\begin{equation}
\widehat \varphi (\zeta)= {{\rm d} B_\varphi(\zeta) \over {\rm d} \zeta}.
\end{equation}
\end{remark}
\begin{remark} Sometimes we want to perform the Borel resummation along an arbitrary direction in the complex plane, specified by an angle $\theta$.
It is then useful to introduce the generalized Borel
resummation
\begin{equation}
\label{thetaresiduum}
s_\theta (\varphi)(z) =a_0 +\int_0^{{\rm e}^{{\rm i} \theta} \infty} {\rm e}^{-z \zeta} \widehat \varphi (\zeta) \, {\rm d} \zeta.
\end{equation}
\end{remark}
A crucial issue in the analysis of Borel resummation is the location of the singularities of $\widehat \varphi(\zeta)$. It is easy to see that, if
\begin{equation}
a_n \sim A^{-n} n! ,
\end{equation}
then $\widehat \varphi (\zeta)$ is analytic in an open neighborhood of radius $A$ around $\zeta=0$. There is a singularity at $z=A$, which can be a pole or a branch point. If the singularity is not on the positive real axis, the integral (\ref{borelr}) defining the Borel resummation is typically well defined and reconstructs the original function, see \figref{borelan}.
\begin{figure}[!ht]
\leavevmode
\begin{center}
\includegraphics[height=4cm]{borelanalytic.pdf}
\end{center}
\caption{The Borel transform defines an analytic function in a neighbourhood of $\zeta=0$, of radius $\rho=A$. There is a singularity on the circle $|\zeta|=A$, shown here as a red point on the negative real axis. If we can analytically
continue this function to a neighbourhood of the positive real axis, and its Laplace transform exists, we say that the series is
Borel summable.}
\label{borelan}
\end{figure}
\begin{example} {\it Borel resummation and Euler's equation}. Let us consider Euler's equation (\ref{eulereq}) with $A=-1$. Then, the formal solution is the asymptotic series
\begin{equation}
\varphi (z) = \sum_{n\ge0} {(-1)^n n!\over z^{n+1}}.
\end{equation}
Its Borel transform is
\begin{equation}
\label{boreuler}
\widehat \varphi(\zeta)= \sum_{n=1}^{\infty} (-1)^{n-1} \zeta^{n-1} ={1\over 1+\zeta}.
\end{equation}
Since $\widehat \varphi(\zeta)$ has no singularities on the positive real axis, we can define the Borel resummation
\begin{equation}
s \left(\varphi\right)(z)=\int_0^{\infty} {\rm e}^{-z \zeta} {{\rm d} \zeta \over 1+ \zeta},
\end{equation}
which defines an analytic function in the region
\begin{equation}
{\rm Re}\, z>0
\end{equation}
and reconstructs a true solution to the original differential equation.
\end{example}
\begin{example} {\it Borel resummation and the $c=1$ string}. Let us consider the following asymptotic series,
\begin{equation}
\label{B-ser}
\varphi(z)=\sum_{n\ge 1} {B_{n+2} \over n (n+2)} z^{-n},
\end{equation}
where $B_n$ are the Bernoulli numbers. Since $B_{2k}=0$ for $k\ge 1$, only even powers of $z$ appear. This series appears often in string theory. It gives the
genus expansion of the $c=1$ string at the self-dual radius (see for example \cite{klebanov}), and it also appears in the asymptotic $1/N$ expansion of the
partition function of the Gaussian matrix model (see for example \cite{mmleshouches}). To see that the series is asymptotic, notice that
\begin{equation}
\label{bernoulli-as}
B_n =- 2 (-1)^{n/2} (2 \pi)^{-n} n! \zeta(n), \qquad n\ge 2.
\end{equation}
The zeta function behaves at infinity as,
\begin{equation}
\zeta(n)\approx 1, \qquad n\rightarrow \infty,
\end{equation}
up to exponentially small corrections in $n$, so the series (\ref{B-ser}) is alternating and factorially divergent. Its Borel transform can be computed explicitly,
\begin{equation}
\widehat \varphi(\zeta)=\sum_{n\ge 1} {B_{n+2} \over (n+2) n!} \zeta^{n}={1\over \zeta^2} -{1\over 12} -{1\over 4} {\rm csch}^2\left({\zeta\over 2}\right).
\end{equation}
It has singularities along the imaginary axis, at the points $\zeta=2 \pi m {\rm i}$, $m=\pm 1, \pm 2, \cdots$. It has no singularities along the positive real axis,
and the integral of the Borel transform
\begin{equation}
s(\varphi)(z)=\int_0^\infty {\rm d} \zeta \, {\rm e}^{-z \zeta} \left[ {1\over \zeta^2} -{1\over 12} -{1\over 4} {\rm csch}^2\left({\zeta\over 2}\right)\right],
\end{equation}
gives the Borel resummation of the original series for $z>0$ (see \cite{ps} for more details on this and related examples).
\end{example}
\begin{example} {\it Borel resummation and the Airy function}. In the case of the Airy function we can proceed as follows. Let us define
\begin{equation}
\varphi_{\rm Ai}(z)= \sum_{n=0}^{\infty} {a_n \over z^n},
\end{equation}
where the coefficients $a_n$ are given in (\ref{anairy}). Its Borel transform can be explicitly computed as a hypergeometric function
\begin{equation}
\label{airybor}
\widehat \varphi_{\rm Ai}(\zeta) = -{5 \over 48} \, _2F_1 \Bigl( {7\over 6}, {11\over 6};2; -{3\zeta\over 4} \Bigr)
\end{equation}
and it has a branch point singularity at $\zeta= -4/3$. The Borel resummation,
\begin{equation}
s \left( \varphi_{\rm Ai}\right)(z)=a_0+ \int_0^{ \infty} \widehat \varphi_{\rm Ai}(\zeta) {\rm e}^{-z\zeta} {\rm d} \zeta
\end{equation}
is well-defined if
\begin{equation}
\label{rez}
{\rm Re}(z) \ge 0
\end{equation}
and it reconstructs the {\it full} Airy function in the region (\ref{rez}) (see for example \cite{delabaere}). We then have,
\begin{equation}
{\rm Ai}(x)= {1\over 2x^{1/4} {\sqrt{\pi}}} {\rm e}^{-2 x^{3/2}/3} s \left( \varphi_{\rm Ai}\right)(z), \qquad x=z^{2/3}.
\end{equation}
In terms of ${\rm arg}(x)$, this representation is valid as long as
\begin{equation}
\bigl| {\rm arg}\, x \bigr|<{\pi \over 3}.
\end{equation}
\end{example}
\begin{figure}[!ht]
\leavevmode
\begin{center}
\includegraphics[height=4cm]{lateral.pdf}
\end{center}
\caption{The paths ${\cal C}_{\pm}$ avoiding the singularities of the Borel transform from above (respectively, below).}
\label{lateralfig}
\end{figure}
Very often one encounters asymptotic series whose Borel transform has singularities on the positive real axis.
In this case one needs some prescription in the integral (\ref{borelr}) to avoid the singularities.
A standard procedure to do this is to consider {\it lateral Borel resummations}.
Let ${\cal C}_{\theta \pm}$ be a path going from $0$ to $+{\rm e}^{{\rm i} \theta} \infty$ and avoiding
the singularities of $\widehat \varphi(\zeta)$ on the direction with angle $\theta$ from above (resp. below). For $\theta=0$, we will simply write the paths as ${\cal C}_\pm$, and in this case they have the form shown in \figref{lateralfig}. The {\it lateral Borel resummations} are then defined as,
\begin{equation}
\label{lateralres}
s_{ \theta \pm} \left(\varphi\right)(z)= a_0+ \int_{{\cal C}_{ \theta\pm}} {\rm e}^{-z \zeta} \widehat \varphi(\zeta) {\rm d} \zeta,
\end{equation}
provided the integral is convergent. Notice that, even if the original series has real coefficients and we choose a direction along the real axis,
since the lateral Borel resummations are computed by integrals
along paths in the complex plane, they lead in general to complex-valued functions.
\begin{example} {\it Lateral Borel resummations and Euler's equation}. Let us consider the Borel transform
(\ref{boreuler}) along the negative real axis. Since there is a singularity at $\zeta=-1$, we are forced to perform lateral Borel
resummations:
\begin{equation}
s_{\pi \pm} \left(\varphi\right)(z)=\int_{{\cal C}_{\pi \pm} } {\rm e}^{-z \zeta} {{\rm d} \zeta \over 1+ \zeta}.
\end{equation}
These integrals give two different solutions to the original differential equation for $z<0$. Their difference can be computed as a residue,
\begin{equation}
\label{eulerjump}
s_{ \pi +} \left(\varphi\right)(z)-s_{ \pi-} \left(\varphi\right)(z)=\int_{{\cal C}_{\pi+} -{\cal C}_{\pi-}}{{\rm e}^{-z \zeta} \over 1+ \zeta} {\rm d} \zeta=
2 \pi {\rm i} \, {\rm Res}_{\zeta=-1} {{\rm e}^{-z \zeta} \over 1+ \zeta} =2\pi {\rm i} \, {\rm e}^z,
\end{equation}
and it is {\it exponentially small} along the negative real axis. In fact, it is the exponentially small term appearing in the trans-series solution (\ref{eulerts}). See \cite{ss} for
a detailed discussion of this example.
\end{example}
\begin{example} {\it Lateral Borel resummations for the Airy function}. Let us consider the Borel transform
(\ref{airybor}) along the negative real axis in the $z$ variable. In terms of the $x$ variable, this corresponds to ${\rm arg}(x)=2 \pi/3$, i.e. to a
Stokes line. Along this direction, the coefficients of the series defining the Airy function (\ref{fullai}) are no longer alternating, and this is the standard indication that the series is not Borel summable along such a direction. Indeed, since there is a singularity at $\zeta=-4/3$, we have to consider lateral Borel
resummations:
\begin{equation}
s_{\pi \pm} \left(\varphi_{\rm Ai} \right)(z)=a_0 + \int_{{\cal C}_{\pi \pm} } \widehat \varphi_{\rm Ai}(\zeta) {\rm e}^{-z \zeta} {\rm d} \zeta.
\end{equation}
We will now show that (see, for example, \cite{delabaere})
\begin{equation}
\label{airyjump}
s_{\pi +} \left(\varphi_{\rm Ai} \right)(z)-s_{\pi -} \left(\varphi_{\rm Ai} \right)(z)=-{\rm i} \, {\rm e}^{4z/3} s_{\pi} \left( \varphi_{\rm Bi}\right) (z)
\end{equation}
where
\begin{equation}
\varphi_{\rm Bi}(z)=\sum_{n=0}^{\infty} (-1)^n {a_n \over z^n}=\varphi_{\rm Ai}(-z).
\end{equation}
Notice that, since $\widehat \varphi_{\rm Bi}(\zeta)$ is analytic on the negative real axis, the above Borel
resummation is well-defined. The derivation of (\ref{airyjump}) goes as follows. By integrating by parts, changing
variables $\zeta=-x$, and using the explicit result (\ref{airybor}), we can write down the l.h.s. of (\ref{airyjump}) as
\begin{equation}
- z\int_0^\infty \left( ~_2F_1 \left( {1\over 6}, {5\over 6}, 1; {3x \over 4} + {\rm i} \epsilon \right)-
~_2F_1 \left( {1\over 6}, {5\over 6}, 1; {3x \over 4} - {\rm i} \epsilon \right) \right) {\rm e}^{z x } {\rm d} x.
\end{equation}
The discontinuity of the hypergeometric function vanishes unless $x\ge 4/3$, and for this range of $x$ it is given by
\begin{equation}
~_2F_1 \left( {1\over 6}, {5\over 6}, 1; {3x \over 4} + {\rm i} \epsilon \right)-
~_2F_1 \left( {1\over 6}, {5\over 6}, 1; {3x \over 4} - {\rm i} \epsilon \right)= {\rm i} ~_2F_1 \left( {1\over 6}, {5\over 6}, 1; 1-{3x \over 4} \right).
\end{equation}
After using this result and changing variables $x= u+4/3$, we find
\begin{equation}
s_{\pi +} \left(\varphi_{\rm Ai} \right)(z)-s_{\pi -} \left(\varphi_{\rm Ai} \right)(z)=-{\rm i} z {\rm e}^{4z/3} \int_0^\infty ~_2F_1 \left( {1\over 6}, {5\over 6}, 1; -{3u \over 4} \right){\rm e}^{z u} {\rm d} u.
\end{equation}
Integrating by parts again, we obtain (\ref{airyjump}). As in the previous example, the difference
of lateral resummations is given by a trans-series
solution. We will see in a moment that this is the way in which the Stokes phenomenon manifests itself in the context of
Borel resummations.
\end{example}
Let us now see in general how to apply Borel resummation to the study of ODEs. If ${\boldsymbol \varphi}_0(z)$ is a formal solution to an
ODE, its Borel resummations (or lateral Borel resummations)
will provide functions which solve the ODE and have the asymptotic behavior given by ${\boldsymbol \varphi}_0(z)$.
However, we can add exponentially small corrections
to this solution without changing the asymptotics. In general, the multi-parameter family
\begin{equation}
\label{ysols}
{\boldsymbol \varphi}_{\pm} (z; {\bf C}_\pm) =s_{\pm} \left( {\boldsymbol \varphi}_0\right)(z) +\sum_{{\bf k}} {\bf C}_{\pm}^{\bf k} z^{-{\bf k} \cdot {\boldsymbol \beta}} {\rm e}^{-{\boldsymbol \lambda} \cdot {\bf k} z} s_{\pm} \left( {\boldsymbol \varphi}_k\right)(z),
\end{equation}
which is obtained by doing lateral Borel resummations on the formal trans-series solution (\ref{trans}),
is a good solution for sufficiently large $|z|$ which asymptotes to ${\boldsymbol \varphi}_0(z)$, provided the non-vanishing terms in the trans-series are such that
\begin{equation}
{\rm Re}({\boldsymbol \lambda} \cdot {\bf k} z)>0.
\end{equation}
The reciprocal is true: any solution to the ODE can be represented by such a
Borel-resummed trans-series for an appropriate choice of ${\bf C}$'s. This is one of the main consequences of \'Ecalle's theory of resurgence, see for example \cite{ss,approche,costin} for
detailed statements and proofs. We see that the main advantage of the Borel resummed version of asymptotic analysis is that {\it we can make sense of small exponentials}, i.e.
we can incorporate the information encoded in the
trans-series in a systematic way. This is not the case in classical asymptotics.
What is the interpretation of the Stokes phenomenon in the context of Borel resummation?
In classical asymptotics, Stokes lines indicate the appearance of small exponentials, as we have seen in the analysis of the Airy function: we pass from (\ref{oneint}) to (\ref{stokesints}). However, this jump is only noticed in the classical theory when we reach the anti-Stokes line.
Once we use Borel resummation, we can give a ``post-classical" version of the Stokes phenomenon. Let us consider a Stokes direction,
which we take for simplicity to be ${\rm arg}(z)=0$, corresponding to the eigenvalue $A=\lambda_1>0$. Along this direction, there are two families of solutions ${\boldsymbol \varphi}_{\pm} (z; {\bf C}_\pm)$, obtained by lateral summations from below and from above. By uniqueness we
should expect these two solutions to be related. Indeed, we have the relation
\begin{equation}
\label{stokesjump}
{\boldsymbol \varphi}_{+} (z; {\bf C}) ={\boldsymbol \varphi}_{-} (z; {\bf C}+ {\bf S}),
\end{equation}
where
\begin{equation}
{\bf S}=(S_1,0, \cdots, 0)
\end{equation}
is called the {\it Stokes parameter} associated to the Stokes line ${\rm arg}(z)=0$, and it is an imaginary number when the coefficients of the
trans-series are real. At leading order in the exponentially small parameter $\exp(-Az)$ we find
\begin{equation}
\label{diffstokes}
{\boldsymbol \varphi}_{0;+}(z)-{\boldsymbol \varphi}_{0;-}(z) \approx S_1 z^{-\beta_1} {\rm e}^{-A z} {\boldsymbol \varphi}_{(1,0,\cdots, 0);-}(z).
\end{equation}
The relation (\ref{stokesjump}) is the Borel-resummed version of the Stokes phenomenon. It says that the
coefficients of the trans-series solutions have {\it discontinuous jumps} along the Stokes direction. The results (\ref{eulerjump}) and (\ref{airyjump}) are two particular examples of this general result, in which the Stokes line is the negative real axis for the $z$ variable. The Stokes parameters in
these examples are $S=2 \pi {\rm i}$ for the Euler equation, and $S=-{\rm i}$
for the Airy function. For a pedagogical explanation of (\ref{stokesjump}) in the case of ODEs, see \cite{ss,approche}. The generalization to systems
of ODEs is presented in \cite{costin}.
The relationship (\ref{diffstokes}) has a very nice interpretation in terms of Borel transforms, which we will make explicit
for simplicity in the case of a first order ODE. In this case the vector ${\boldsymbol \beta}$ has one single entry which we take to be
$\beta=0$. Let us denote the perturbative solution by $\varphi_0 (z)$, with the form (\ref{seriesone}), and the first trans-series by
\begin{equation}
\label{phione}
\varphi_1(z)=\sum_{n\ge 0} { \varphi_{1,n} \over z^n}.
\end{equation}
The l.h.s. of (\ref{diffstokes}) can be written as
\begin{equation}
\label{intaround}
\int_\gamma {\rm d} \zeta \, {\rm e}^{-z \zeta} \widehat \varphi_0 (\zeta),
\end{equation}
where $\gamma$ is homotopic to the contour ${\cal C}_+ -{\cal C}_-$, and encircles the singularities of the Borel transform. Then, (\ref{diffstokes}) tells us that the structure of
$\widehat \varphi(\zeta)$ around the singularity at $\zeta=A$ is of the form
\begin{equation}
\label{resurg}
\widehat \varphi_0(A +\xi) = -S_1 \left( {\varphi_{1,0} \over 2 \pi {\rm i}\xi} + {\log(\xi) \over 2 \pi {\rm i}} \widehat \varphi_1(\xi)+ {\rm holomorphic} \right).
\end{equation}
where $\widehat \varphi_1(\zeta)$ is the Borel transform of (\ref{phione}). This is easy to check: the integral (\ref{intaround}) can be evaluated by using (\ref{resurg}). The first
term in (\ref{resurg}) gives the residue at the pole $\zeta=A$, namely
\begin{equation}
S_1 {\rm e}^{-A z} \varphi_{1,0},
\end{equation}
while the second term in (\ref{resurg}) gives the integral of the discontinuity of the log, namely
\begin{equation}
S_1 {\rm e}^{-A z} \int_0^{\infty} {\rm d} \xi \, {\rm e}^{-z\xi} \widehat \varphi_1(\xi) = S_1 {\rm e}^{-A z} \sum_{n\ge 1} { \varphi_{1,n} \over z^n}.
\end{equation}
We then reconstruct the l.h.s. of (\ref{diffstokes}).
The relationship (\ref{diffstokes}) is then telling us that the singular behavior of the Borel transform of the perturbative series is related to the first
instanton trans-series. This shows that ``perturbative" and ``non-perturbative" phenomena are intimately related, at least in this example. In the next subsection
we will see that this relationship
has a powerful corollary, namely it gives an asymptotic formula for the large order behavior of the coefficients of the perturbative series.
Equation (\ref{resurg}) is an example of the {\it resurgence relations} discovered by Jean \'Ecalle, and it forms the basis of the so-called ``alien calculus" of his theory (see \cite{cnp} for an introduction). In fact, (\ref{resurg}) is just the tip of the iceberg, since relations of this type connect all the formal power series in the trans-series, and not only the perturbative series and the first instanton.
\begin{example} In the case of the Airy function, one can use the relation (\ref{airyjump}) to derive the following formula for the Airy function along the negative real axis \cite{delabaere}
\begin{equation}
{\rm Ai}(x)= {1\over 2x^{1/4} {\sqrt{\pi}}} \biggl\{ {\rm e}^{-2 x^{3/2}/3} \, s_{\pi/2} \left( \varphi_{\rm Ai}\right)(z)+
{\rm i} \, {\rm e}^{2 x^{3/2}/3}\, s_{\pi/2} \left(\varphi_{\rm Bi}\right)(z)\biggr\}, \qquad {\rm Re}(x)<0,
\end{equation}
where $x=z^{2/3}$. This is the {\it exact} version of the oscillatory asymptotics given in (\ref{oscila}) and (\ref{stokesints}).
\end{example}
\begin{example} \label{p2hm} {\it Painlev\'e II and the Hastings--McLeod solution}. As an example of how to reconstruct a ``true" function from the asymptotics in the case of non-linear ODEs,
including exponentially small
corrections, we consider the example of Painlev\'e II, whose formal structure was discussed in Example \ref{exp2}. This equation has a Stokes line at ${\rm arg}(\kappa)=0$. The PII equation has a solution, called the {\it Hastings--McLeod solution}, $u_{\rm HM}(\kappa)$ which is uniquely characterized by the following properties:
\begin{enumerate}
\item It is real for real $\kappa$.
\item As $\kappa \rightarrow \infty$ it asymptotes
\begin{equation}
u_{\rm HM}(\kappa) \sim \kappa^{1/2}.
\end{equation}
%
\item As $\kappa \rightarrow -\infty$ it asymptotes
%
\begin{equation}
\label{minusas}
u(\kappa) \sim {\rm e}^{- 2 {\sqrt{2}} (-\kappa)^{3/2}/3}.
\end{equation}
\end{enumerate}
\begin{figure}[!ht]
\leavevmode
\begin{center}
\includegraphics[height=4cm]{HM.pdf}
\end{center}
\caption{The Hastings--McLeod solution to Painlev\'e II. As $\kappa \rightarrow \infty$ it asymptotes $\sqrt{\kappa}$, while as $\kappa \rightarrow -\infty$ it
decays exponentially as indicated in (\ref{minusas}).}
\label{HMfig}
\end{figure}
This solution of PII plays a crucial r\^ole in the Tracy--Widom law \cite{tw} and in the double-scaling limit of unitary matrix models \cite{cdm}. The Hastings--McLeod solution is shown in \figref{HMfig}. Since it is a solution to Painlev\'e II, at sufficiently large $\kappa$ one must be able to express it as the Borel resummation of the trans-series (\ref{p2trans}).The Borel transform has a singularity at $\zeta=A =4/3$, therefore we have to use lateral resummations along the positive real axis. The relation (\ref{stokesjump}) reads in this case,
%
\begin{equation}
\label{p2jump}
u_+(\kappa;C)= u_-(\kappa;C+ S)
\end{equation}
%
where the subscripts $\pm$ refer to the two lateral resummations of the full trans-series solution (\ref{p2trans}), and
%
\begin{equation}
S=-{{\rm i} \over {\sqrt{2 \pi}}}
\end{equation}
%
is the Stokes parameter. By using results on the non-linear Stokes phenomenon for the Painlev\'e II equation \cite{painlevebook} one can show that \cite{mmnp}
%
\begin{equation}
u_{\rm HM}(\kappa)=u_+(\kappa;-S/2).
\end{equation}
%
Notice that this solution is real. Complex conjugation changes the sign of $S$ and also exchanges the integrals along the contours
${\cal C}_+$ and ${\cal C}_-$, so we have
%
\begin{equation}
u_+(\kappa;-S/2)^*=u_-(\kappa;S/2)=u_+(\kappa; -S/2)
\end{equation}
%
where we used (\ref{p2jump}). The connection to Borel resummed formal solutions gives the correct ``semiclassical" content of the Hastings--McLeod solution, and one easily shows that
%
\begin{equation}
u_+(\kappa;-S/2)={1\over 2} (u^{(0)}_+ +u^{(0)}_-) - {1\over 8} S^2 (u^{(2)}_+ +u^{(2)}_-)
+\cdots
\end{equation}
%
\end{example}
The equation (\ref{stokesjump}) is very important conceptually, when interpreted from a physical point of view.
As we will show in detail in these lectures, the formal power series ${\boldsymbol \varphi}_0$
has often the interpretation of a ``perturbative" series, while the trans-series ${\boldsymbol \varphi}_{\bf k}$
have the interpretation of non-perturbative effects. The coefficients ${\bf C}$ give the
strength of these effects. But one clear implication of (\ref{stokesjump}) is that this strenght is not well-defined unless we give a prescription to perform the Borel resummation
of the power series appearing in the formal solution. Indeed, different prescriptions lead to different coefficients, and in particular the upper and lower lateral resummations differ by a shift involving the Stokes parameters. Notice that there is a ``compensation" effect, in the sense that we can change simultaneously the resummation prescription and the strenght of the non-perturbative effects so that the solution is unchanged. This is indeed the content of (\ref{stokesjump}). This phenomenon was noticed in the literature on renormalons in gauge theories (see for example \cite{grunberg} for a clear presentation), as well as in the study of instantons in QM \cite{zjj}, and is called in those contexts the {\it cancellation of non-perturbative ambiguities}.
\subsection{Non-perturbative effects and large order behavior}
An important consequence of the Borel resummation technique is a relationship between the asymptotic behavior of the coefficients of a perturbative series, and the first instanton or trans-series solution. This type of relationships were anticipated in \cite{dingle} and in the work on the large
order behavior of quantum perturbation theory \cite{largeorder}, and it will
be important in the following lectures. We have already seen in the examples of the Euler equation and the Airy function
that the ``action" appearing in the trans-series controls the large order behavior of the
perturbative coefficients. This is a general phenomenon. We will now give a heuristic derivation of an asymptotic formula for the coefficients, in the case of a first order ODE with
$\beta_1=0$ (see \cite{ck,gama} for details and rigorous proofs). We will also write down various generalizations of the result.
We will denote the ``perturbative" series and its Borel transform by
\begin{equation}
\varphi_0(z)=\sum_{n \ge 0} {a_{n+1} \over z^{n+1}}, \qquad \widehat \varphi_0(\zeta)= \sum_{n\ge0} {a_{n+1} \over n!} \zeta^n.
\end{equation}
Therefore
\begin{equation}
{a_{n+1} \over n!} ={1\over 2\pi {\rm i}} \oint_{{\cal C}_0} {\rm d} \zeta {\widehat \varphi_0(\zeta) \over \zeta^{n+1}}.
\end{equation}
where ${\cal C}_0$ is a contour around the origin. We know that $\widehat \varphi_0(\zeta)$ has a singularity at $\zeta=A$, and we can deform the contour ${\cal C}_0$ so as to enclose this
singularity. In general, there are singularities at other points with $|\zeta|>A$, but they give exponentially small
corrections as compared to what we are computing. We can then write
\begin{equation}
\label{per-np-odes}
{a_{n+1} \over n!} \sim {1\over 2\pi {\rm i}} \int_{\zeta=A} {\rm d} \zeta {\widehat \varphi_0(\zeta) \over \zeta^{n+1}},
\end{equation}
up to exponentially small corrections. We know the singularity structure of $\widehat \varphi_0(\zeta)$ near $\zeta=A$
thanks to the result (\ref{resurg}): there is a pole with residue $-S_1 \varphi_{1,0}/2\pi {\rm i} $, and a logarithmic discontinuity. Changing
variables $\zeta=A+\xi$, we obtain
\begin{equation}
{a_{n+1} \over n!} \sim {S_1\varphi_{1,0}\over 2\pi {\rm i}} \oint_{0} {1 \over \zeta (A+\zeta)^{n+1}}{{\rm d} \zeta \over 2\pi {\rm i}}+ {S_1 \over 2\pi {\rm i}} \sum_{k\ge 1} {\varphi_{1,k} \over (k-1)!} \int_0^{\infty} {\zeta^{k-1} \over (A + \zeta)^{n+1} }{\rm d} \zeta.
\end{equation}
Since
\begin{equation}
\int_0^{\infty} {\zeta^{k-1} \over (A+ \zeta)^{n+1} }{\rm d} \zeta =A^{k-n-1} {\Gamma(k) \Gamma(n+1-k) \over \Gamma(n+1)}
\end{equation}
we have the following result for the all-order asymptotics of the coefficients:
\begin{equation}
\label{simpleas}
a_n \sim {S_1 \over 2\pi {\rm i}} \Gamma(n)A^{-n} \sum_{k\ge0} {\varphi_{1,k} A^k \over \prod_{i=1}^k (n-i)}.
\end{equation}
This is a beautiful result. It implies that the leading asymptotics of the original (``perturbative") series is encoded in the first trans-series (or ``one-instanton") solution (there are further, exponentially suppressed contributions associated to the higher singularities). Conversely,
all the information about the formal one-instanton series is encoded in the asymptotics of the perturbative series. Notice that the leading and next-to-leading order of the asymptotics is given by
\begin{equation}
a_n \sim n! A^{-n}
\end{equation}
as in the examples discussed above. However, (\ref{simpleas}) contains much more information. In particular, the Stokes parameter $S_1$ plays a crucial r\^ole in the
asymptotics, and in fact (\ref{simpleas}) provides a method to determine this parameter numerically in cases in which it is not known analytically.
Before considering generalizations of the above equation, let us work out in some
detail an interesting example which will illustrate most of the considerations of this first lecture.
\begin{example} {\it A Riccati equation}. Following \cite{ss,bonet}, let us consider the ODE
\begin{equation}
\label{sric}
{{\rm d} \varphi \over {\rm d} z}= \varphi -{1\over z} (b^{-}+b ^+ \varphi^2).
\end{equation}
Here, $b^\pm$ are real constans, and we assume that
\begin{equation}
\beta^2=-b^- b^+
\end{equation}
is positive. The ODE (\ref{sric}) generalizes the Euler equation (\ref{eulereq}), which is obtained (for $A=-1$) when $b^+=0$. Equation (\ref{sric}) is a particular case of the
so-called Riccati equation, which is characterized by being quadratic in the unknown function. It is easy to see that there is a formal solution of (\ref{sric}) around $z=\infty$ which is given by
\begin{equation}
\varphi_0(z)= b^- \sum_{n\ge 1} {a_n \over z^n},
\end{equation}
and the coefficients $a_k$ are obtained from the non-linear recursion
\begin{equation}
a_{k+1}=-k a_k -\beta^2\sum_{\ell=1}^{k-1} a_\ell a_{k-\ell}, \quad a_0=1.
\end{equation}
Explicitly, we find
\begin{equation}
\label{particularcase}
\varphi_0(z)=b^- \Bigl\{ {1\over z} -{1\over z^2} +{2-\beta^2 \over z^3} -{6 -5\beta^2 \over z^4}+ \cdots \Bigr\}.
\end{equation}
The Riccati ODE (\ref{sric}) has a full trans-series solution of the form
\begin{equation}
\varphi(z,C)= \sum_{n\ge 0} C^n {\rm e}^{nz} \varphi_n(z),
\end{equation}
therefore there is a series of ``multi-instantons" with action $nA$, and $A=-1$. The Stokes parameter for this ODE has been computed exactly in \cite{bonet}, and it is given by
\begin{equation}
S_1=-2 \pi {\rm i} b_- \sigma(\beta), \qquad \sigma(\beta)={\sin(\pi \beta) \over \pi \beta}.
\end{equation}
Using (\ref{simpleas}), we obtain the following asymptotics for the coefficients $a_n$:
\begin{equation}
a_n \sim \sigma(\beta) (-1)^{n-1} (n-1)!\left( 1+ {\cal O}\left({1\over n}\right) \right).
\end{equation}
This is easy to test numerically. To do that, one simply studies the asymptotic behaviour of the sequence
\begin{equation}
\label{snseq}
s_n=(-1)^{n-1} {a_n \over (n-1)!}
\end{equation}
which should converge towards the constant value $\sigma(\beta)$. However, the resulting convergence is quite slow. One can accelerate it by using
{\it Richardson transformations}. Let us assume that a sequence
$s_n$ has the asymptotics
\begin{equation}
\label{sequ}
s_n \sim \sum_{k=0}^{\infty} {c_k \over n^k}
\end{equation}
for $n$ large. Its $N$-th Richardson transform can be defined recursively by
\begin{equation}
\label{richardson}
\begin{aligned}
s^{(0)}_n&=s_n, \\
s^{(N)}_n&=s_{n+1}^{(N-1)} + {n \over N}(s_{n+1}^{(N-1)} -s_n^{(N-1)}),
\quad N\ge 1.
\end{aligned}
\end{equation}
The effect of this transformation is to remove subleading tails in
(\ref{sequ}), and
\begin{equation}
s^{(N)}_n \sim c_0 + {\cal O}\Bigl({1\over n^{N+1}}\Bigr).
\end{equation}
The values $s^{(N)}_n$ give numerical approximations to $c_0$,
and these
approximations become better as $N$, $n$ increase. Once a numerical
approximation to $c_0$ has been obtained, the value of $c_1$ can be estimated
by considering the sequence $n(s_n-c_0)$, and so on. In \figref{riccatif} we plot the original sequence $s_n$ and its first and second Richardson transforms, for $\beta=1/3$. The convergence towards
\begin{equation}
\sigma\left( {1\over 3}\right)\approx 0.826993...
\end{equation}
is quite fast, and $s^{(2)}_{250}$ gives an approximate value for this constant which agrees with the right value up to the seventh decimal digit.
\begin{figure}[!ht]
\leavevmode
\begin{center}
\includegraphics[height=5cm]{riccati.pdf}
\end{center}
\caption{The sequence $s_n$ in (\ref{snseq}) (top) and its first and second Richardson transforms (bottom).}
\label{riccatif}
\end{figure}
\end{example}
The asymptotic result (\ref{simpleas}) has many generalizations. For example, when $\beta\not=0$, and the trans-series have the structure
\begin{equation}
\label{trans-beta}
\varphi_k (z)=z^{-\beta k} \sum_{n=0}^{\infty} \varphi_{k,n} z^{-n},
\end{equation}
a simple generalization of the above argument shows that
\begin{equation}
\label{largeanbeta}
a_n \sim {S_1 \over 2\pi {\rm i}} \Gamma(n-\beta) A^{-n+\beta} \sum_{k\ge0} {\varphi_{1,k} A^k \over \prod_{i=1}^k (n-\beta-i)}.
\end{equation}
One can also generalize the argument to higher order ODEs. In that case, as seen in (\ref{ysols}), there are various possible instanton actions for the trans-series. The asymptotics is governed by the
trans-series which correspond to the smallest actions, in absolute value. If there are instanton actions which have the same smallest absolute value but different phases, the asymptotics is obtained by adding their different contributions, see \cite{ck} for a precise mathematical statement. For example, for the coefficients $u_{n,0}$ of the perturbative solution (\ref{asymp1}) of PI, one has the following asymptotics \cite{kapaev,jk}
\begin{equation}
u_{0,n}
\sim A^{-2n+{1\over 2}} \Gamma\Bigl(2n-{1\over 2} \Bigr)\,
{S_1 \over \pi {\rm i}} \biggl\{1 + \sum_{l=1}^{\infty} {u_{1,l} A^{l}
\over \prod_{k=1}^{l} (2n-1/2 -k)} \biggr\},
\end{equation}
which comes from two instanton actions $\pm A$. In this expression, $u_{1,l}$ are the coefficients of the $1$-instanton
series $\epsilon^{(1)}(\kappa)$ appearing in (\ref{el}), $A$ is the instanton action (\ref{aaction}), and
\begin{equation}
S_1 = -{\rm i} {3^{1\over 4} \over 2{\sqrt{\pi}}}
\end{equation}
is a Stokes parameter.
Since the trans-series solutions are also formal, asymptotic series, one can ask what is the asymptotic behavior of their coefficients.
In the same way that the asymptotics of the perturbative coefficients is encoded in the first trans-series, it turns out that the asymptotics of the coefficients of a given trans-series is encoded in higher trans-series solutions. The study of this question requires the full machinery of resurgent analysis, see
\cite{gama,gikm,asv} for various results along this direction.
\subsection{Lessons}
We can now summarize some of the results that can be learned from the study of asymptotic series appearing in ODEs. All of these results will have a counterpart when we look at
instanton corrections in quantum theories and string theories, and therefore they constitute a sort of ``r\^ole model" for their study.
\begin{enumerate}
\item The standard perturbative contribution (${\bf y}_0(x)$, in the context of ODEs) is a {\it factorially divergent, asymptotic series}. At the formal level, one
can also obtain trans-series solutions. These will correspond to perturbation theory around instanton solutions, i.e. to {\it non-perturbative effects}.
\item The weights of the instanton solutions are {\it a priori} undetermined. Therefore, the general trans-series solution gives a multi-parameter family of
formal solutions. This is the {\it non-perturbative ambiguity}.
\item By Borel resummation, the family of formal solutions becomes a family of ``true" solutions. Therefore, we obtain
a family of {\it non-perturbative completions}.
If we have a {\it non-perturbative definition}
of the theory we can fix the non-perturbative ambiguity by choosing the values of the parameters that reproduce the non-perturbative definition. Along directions where the series is Borel summable, the original solution is typically reconstructed by Borel resummation of the perturbative series solely.
\item Along Stokes lines there are different prescriptions for resummation, due to singularities in the Borel transform. Their difference is purely non-perturbative and defines the Stokes parameter. This is the ``resurgent" version of the Stokes phenomenon. The reconstruction of the non-perturbative solution involves in a crucial way the Borel-resummed non-perturbative effects, as we showed in Example \ref{p2hm} for the
Hastings--McLeod solution of PII .
\item The large order behavior of the perturbative expansion along a Stokes line encodes the action of the instanton, the Stokes parameter, and the coefficients of the
first instanton correction. This relation is extremely powerful, since it says that the large order behavior of perturbation theory knows about non-perturbative corrections.
In cases where there is no clear technique (or even framework!) to address the computation of non-perturbative effects, the large order behavior of the perturbative series gives an important hint about
their structure.
\end{enumerate}
\sectiono{Non-perturbative effects in Quantum Mechanics and Quantum Field Theory}
\subsection{Trans-series in Quantum Mechanics}
Before discussing non-perturbative effects in QFT and matrix models,
it is instructive to first consider simple quantum-mechanical examples, where the analysis of non-perturbative effects can be made in detail. We will focus on the ground state energy of
one-dimensional particles with Hamiltonian
\begin{equation}
H={p^2 \over 2} +V(q).
\end{equation}
We will assume that the potential $V(q)$ is of the form
%
\begin{equation}
\label{wint}
V(q)={1 \over 2} q^2 + g V_{\rm int}(q),
\end{equation}
%
where $g V_{\rm int}(q)$ is the interaction term. A typical example is the quantum anharmonic oscillator, where
\begin{equation}
V(q) ={q^2 \over 2} + {g \over 4} q^4.
\end{equation}
The ground
state energy of a quantum mechanical system in a potential $V(q)$ can be computed in a variety of ways. The most elementary method is of course
Rayleigh--Schr\"odinger perturbation theory, and the resulting series has the form
\begin{equation}
\label{gsseries}
E= \sum_{n=0}^\infty a_n g^n,
\end{equation}
where (in units where $\hbar=1$)
\begin{equation}
a_0={1\over 2}
\end{equation}
is the ground state energy of the harmonic oscillator, and we have an infinite series of corrections due to the interaction term in (\ref{wint}).
In order to make contact with QFT it is instructive to calculate the series (\ref{gsseries}) in terms of diagrams.
To do this, we first notice that the ground state energy can be extracted from the small temperature behavior of the
thermal partition function,
\begin{equation}
Z(\beta) = {\rm tr} \, {\rm e}^{-\beta H(\beta)},
\end{equation}
as
\begin{equation}
\label{grounde}
E= -\lim_{\beta \to \infty} {1\over \beta } \log \, Z(\beta).
\end{equation}
In the path integral formulation one has
\begin{equation}
\label{pathintegral}
Z(\beta) =\int \, {\cal D}[q(t)] {\rm e}^{-S(q)},
\end{equation}
where $S(q)$ is the action of the Euclidean theory,
\begin{equation}
S(q)=\int_{-\beta/2}^{\beta/2} {\rm d} t \, \biggl[ {1\over 2} (\dot q (t))^2 + V(q(t))\biggr],
\end{equation}
and the path integral is over periodic trajectories
\begin{equation}
\label{periodic}
q(-\beta/2)=q(\beta/2).
\end{equation}
The path integral defining $Z$ can be computed in standard Feynman perturbation theory by expanding in $V_{\rm int}(q)$. We will actually work in the limit in which
$\beta \rightarrow \infty$, since in this limit many features are simpler, like for example the form of the propagator. In this limit, the free energy will be given by $\beta$ times a $\beta$-independent constant, as follows from (\ref{grounde}). In order to extract the ground state
energy we have to take into account the following facts:
%
\begin{enumerate}
\item Since we have to consider $F(\beta)=\log Z(\beta)$, only connected bubble diagrams contribute.
\item The standard Feynman rules in position space
will lead to $n$ integrations, where $n$ is the number of vertices in the diagram. One of these integrations just gives as an overall factor the ``volume" of spacetime, i.e. the factor $\beta$ that we just mentioned. Therefore, in order to extract $E(g)$ we can just perform $n-1$ integrations over ${\mathbb R}$.
\end{enumerate}
\begin{figure}[!ht]
\leavevmode
\begin{center}
\includegraphics[height=5cm]{QMfeynman.pdf}
\end{center}
\caption{Feynman rules for the quantum mechanical quartic oscillator.}
\label{qmf}
\end{figure}
For $\beta \rightarrow \infty$ the propagator of this one-dimensional field theory is simply
%
\begin{equation}
\int {{\rm d} p \over 2 \pi} {{\rm e}^{{\rm i} p \tau}\over p^2 + 1}={{\rm e}^{- |\tau|} \over 2}.
\end{equation}
%
For a theory with a quartic interaction (i.e. the anharmonic quartic oscillator)
%
\begin{equation}
V_{\rm int}(q)={g \over 4} q^4
\end{equation}
%
the Feynman rules are illustrated in \figref{qmf}. One can use these rules to compute the perturbation series of the ground energy of
the quartic oscillator (see Appendix B of \cite{oldbw} for some additional details). We have, schematically,
%
\begin{equation}
a_n =\sum \left( {\text{connected vacuum bubbles}}\right).
\end{equation}
For example, the diagrams contributing up to order $g^3$ are shown in
\figref{feynmanquartic}, and after performing the integrals over the propagators one finds,
%
\begin{equation}
E={1\over 2} +{3\over 4} \Bigl( {g \over 4} \Bigr) -{21\over 8} \Bigl( {g \over 4} \Bigr)^2 + {333 \over 16} \Bigl( {g \over 4} \Bigr)^3 +{\cal O}(g^4),
\end{equation}
which agrees with the result of standard Rayleigh--Schr\"odinger perturbation theory.
A basic property of the above perturbative series is that the coefficients $a_n$ grow {\it factorially} when $n$ is large.
Moreover, this behavior is due to the {\it factorial growth in the number of diagrams} (the Feynman integrals over products of propagators only grow exponentially).
To see this, remember that $a_n$ can be computed as a sum over connected quartic graphs. The total number of connected graphs with $n$ quartic vertices is given by
\begin{equation}
{1\over n!} \langle (x^4)^n \rangle ^{(c)},
\end{equation}
where
\begin{equation}
\langle (x^4)^n \rangle={\int_{-\infty}^{\infty} {\rm d} x \, {\rm e}^{-x^2/2} x^{4n} \over \int_{-\infty}^{\infty} {\rm d} x \, {\rm e}^{-x^2/2}}
\end{equation}
%
is the Gaussian average. By Wick's theorem, the average
counts all possible pairings among $n$ four-vertices. The superscript $(c)$ means that we take the connected part of the average. Since
%
\begin{equation}
\label{simplewick}
\langle x^{2k} \rangle=(2k-1)!! ={(2k)! \over 2^k k!}
\end{equation}
%
we find
%
\begin{equation}
\label{x4corr}
{1\over n!} \langle (x^4)^n \rangle = {(4n-1)!!\over n!}= {(4n)! \over 4^n n! (2n)!}.
\end{equation}
%
As $n \rightarrow \infty$ this behaves like
\begin{equation}
4^{2n} n!,
\end{equation}
i.e. there is a factorial growth in the number of disconnected diagrams. One could think that there might be a substantial reduction in this number
when we consider connected diagrams, but a careful analysis \cite{bendercaswell} shows that this is not the case: at large $n$, the quotient of the number of
connected and disconnected diagrams differs from $1$ only in ${\cal O}(1/n)$ corrections. We conclude that there are $\sim n!$ diagrams that contribute
to $a_n$. The resulting factorial behavior of the perturbative series of the quartic oscillator can be verified by a detailed consideration of Feynman diagrams \cite{bwstat} (see \cite{bender} for a review of these early developments). Therefore, we conclude that the perturbative series for the ground state energy is a formal, divergent power series. This series gives at best an asymptotic expansion of the true non-perturbative ground-state energy, defined in terms of the exact spectrum of the Schr\"odinger operator.
\begin{figure}[!ht]
\leavevmode
\begin{center}
\includegraphics[height=8cm]{feynmanquartic.pdf}
\end{center}
\caption{Feynman diagrams contributing to the ground state energy of the
quartic oscillator up to order $g^3$. }
\label{feynmanquartic}
\end{figure}
We can now ask what is the analogue of the trans-series for this type of problems. As it is well-known (see for example the discussion in the textbook \cite{zj}),
there are instanton contributions to the thermal partition function. For concreteness, let us consider again the quartic oscillator
and let us suppose that the coupling constant is negative, i.e. $g=-\lambda$, with $\lambda>0$,
so that we have a potential of the form
shown in the left hand side of \figref{qinst}. In this case, the Euclidean action has non-trivial saddle-points. The EOM reads
\begin{equation}
\label{aneom}
-\ddot q (t) + q(t) -\lambda q^3(t) =0.
\end{equation}
In the limit $\beta \rightarrow \infty$ one finds the following trajectory with $E=0$,
\begin{equation}
\label{qcsaddle}
q_c(t) =\pm \Bigl( {2\over \lambda}\Bigr)^{1\over 2} {1\over \cosh (t-t_0)},
\end{equation}
where $t_0$ is an integration constant or modulus of the solution. When $\beta \rightarrow \infty$, such a trajectory starts at the origin in the infinite past,
reaches the zero of the potential $V(q)$ at $t=t_0$, and returns to the origin in the infinite future.
As is well-known, the Euclidean action can be regarded as an action in Lagrangian mechanics with an ``inverted" potential $-V(q)$,
and the non-trivial saddle-point described above is simply a trajectory of zero energy in this inverted potential.
\begin{figure}[!ht]
\leavevmode
\begin{center}
\includegraphics[height=4cm]{quarticinstanton.pdf}
\end{center}
\caption{The inverted potential relevant for instanton calculus in the quartic case. The
instanton or bounce configuration $q_c(t)$ leaves the origin at $t=-\infty$, reaches the zero $(2/\lambda)^{1\over2}$ at $t=t_0$,
and comes back to the origin at $t=\infty$. }
\label{qinst}
\end{figure}
The partition function around this non-trivial saddle can be computed at one-loop by
using standard techniques, which we will not review in here (a very complete and updated discussion can be found in chapter 39 of \cite{zj}).
It can be seen that this saddle-point is unstable: it has one, and exactly one, negative mode. This means that the instanton contribution is imaginary.
A detailed analysis, which can be found in for example \cite{cs,zj}, shows that this one-instanton calculation determines the
{\it discontinuity} of the partition function for negative values of the coupling:
\begin{equation}
\label{reson}
{\rm disc}\, Z(-\lambda)=Z(-\lambda + {\rm i} \epsilon)-Z(-\lambda -{\rm i} \epsilon) =2 {\rm i} \,
{\rm Im}\, Z(-\lambda).
\end{equation}
This leads to a discontinuity in the ground-state energy, as a function of the coupling. In the case of the quartic oscillator one finds, at one loop,
\begin{equation}
\label{corrE}
{\rm disc}\, E(-\lambda)=2 {\rm i} \,
{\rm Im}\, E(-\lambda)\approx {8 {\rm i} \over {\sqrt {2 \pi \lambda}}} {\rm e}^{-{A/\lambda}},
\end{equation}
where
\begin{equation}
\label{bounceaction}
A={4\over 3}
\end{equation}
is the action of the saddle (\ref{qcsaddle}) for $\lambda=1$.
This imaginary correction to the energy has a clear physical interpretation: since for negative coupling the potential is unstable, a
particle in its ground state will eventually tunnel. The width of the ground state energy
\begin{equation}
\Gamma=2|{\rm Im} \, E|,
\end{equation}
is inversely proportional to the life-time of the ground state.
The above calculation is just the one-loop approximation to the one-instanton sector. But if we consider multi-instanton expansions at all loops, we expect
to find for the ground state energy a trans-series structure of the form
\begin{equation}
E(g) =\sum_{\ell=0}^{\infty} C^{\ell} E^{(\ell)}(g),
\end{equation}
where $E^{(0)}$ is the asymptotic, perturbative series (\ref{gsseries}), and
\begin{equation}
E^{(\ell)}(g)\propto {\rm e}^{-\ell A/g}
\end{equation}
are the $\ell$-instanton corrections, themselves asymptotic expansions in the coupling constant $z$. In the case of the quartic oscillator they have the structure
\begin{equation}
E^{(\ell)}(-z)=z^{\ell \beta} {\rm e}^{-\ell A/z}\sum_{n=0}^\infty a_{\ell, n} z^n,
\end{equation}
which is identical to (\ref{trans-beta}) (here the expansion is around $z=0$, while in (\ref{trans-beta}) we do the expansion around $z=\infty$).
In other cases, like the double-well potential analyzed in \cite{zj-first,zj,zjj} they are more complicated and include terms of the form $z^n \log z$.
The structure of the trans-series in QM suggests that the instanton action determines the positions of the singularities of the Borel
transform, and that the large order behavior of the coefficients in the perturbative series (\ref{gsseries}) is controlled by the first instanton contribution. These expectations are indeed true, and one can show that much of the structure appearing in ODEs can be extended to the analysis of quantum-mechanical potentials in one dimension. In particular, the ``resurgent" structure of the formal trans-series calculating the energies of bound states in
QM has been established, following the work of Voros \cite{voros}, in \cite{delabaerepham}.
In the case of the quartic oscillator, the Borel transform of the formal power series (\ref{gsseries}) has a singularity at $\zeta=-4/3$.
It is therefore Borel-summable along the positive real axis (i.e. for $g>0$), and its Borel resummation is indeed the exact ground-state energy,
as defined by the Schr\"odinger operator.
This was originally proved in \cite{ggs}, and a proof using the theory of resurgence can be found in \cite{delabaerepham}.
To understand the large order behavior of the series (\ref{gsseries}) we have to take into account the presence of the instanton at negative
$g=-\lambda<0$. In this case, one has to consider lateral resummations of $E(g)$ along the negative real axis. The discontinuity ${\rm disc}\, E(-\lambda)$ gives then the difference between lateral Borel resummations, and the result (\ref{corrE}) can be interpreted as the analogue of (\ref{diffstokes}) in the theory of ODEs: the asymptotic expansion of this difference is
given (at leading order) by the first instanton correction to the energy, which has the general structure
\begin{equation}
s_+ (E)(-z)-s_-(E)(-z) \approx S_1 E^{(1)}(z),
\end{equation}
where $S_1$ is a Stokes parameter. At one-loop we have the asymptotic result,
\begin{equation}
s_+ (E)(-z)-s_-(E)(-z) \approx {8 {\rm i} \over {\sqrt {2 \pi z}}} {\rm e}^{-{4\over 3 z}}.
\end{equation}
Notice, in particular, that the coefficient of the one-loop calculation of the instanton partition function gives the Stokes parameter of the problem.
\begin{figure}[!ht]
\leavevmode
\begin{center}
\includegraphics[height=5cm]{quotients.pdf}
\end{center}
\caption{The bottom line shows the joined points of the sequence $Q_n$, which is defined in (\ref{qn}) from the coefficients $a_n$ of the perturbative series of the ground state energy for the quartic oscillator. The top line is its first Richardson transform, with accelerated convergence to the expected value $1$. }
\label{rt-quartic}
\end{figure}
As in the theory of ODEs, one can use this result to derive the large order behavior of the coefficients $a_n$ in (\ref{gsseries}). Writing
\begin{equation}
\label{ol-qm}
S_1 E^{(1)}(-z)={\rm i} z^{ \beta} {\rm e}^{-A/z}\sum_{n=0}^\infty c_n z^n,
\end{equation}
we find the asymptotic growth
\begin{equation}
\label{akgen}
a_n \sim { (-1)^{n+1}A^{-n+\beta} \over 2 \pi} \Gamma(n-\beta) \left\{ c_0 + \sum_{l=1}^\infty{ c_{l+1} A^l \over \prod_{m=1}^l (n-\beta-m)} \right\}.
\end{equation}
This formula is exactly like the one in (\ref{largeanbeta}), with the only difference of the extra factor $(-1)^{n+1}$ which is due to the fact that we do the perturbative expansion in the variable $g=-z$. Plugging in the concrete values of the quartic oscillator for the different quantities, i.e.
\begin{equation}
\beta=-{1\over 2}, \quad c_0=4 {\sqrt {2 \over \pi}}, \quad A=4/3.
\end{equation}
we find for the large-order behavior
\begin{equation}
\label{famousbw}
a_n \sim (-1)^{n+1}{ {\sqrt 6} \over \pi^{3/ 2}} \Bigl( {3 \over 4} \Bigr)^n \Gamma\Bigl(n +{1\over 2}\Bigr).
\end{equation}
This can be tested against an explicit study of the behavior of the coefficients $a_n$ as $n$ grows large. These coefficients can be computed explicitly at for large values of $n$
by using a recursion relation found in \cite{oldbw}. In \figref{rt-quartic} we plot the quotient
\begin{equation}
\label{qn}
Q_n= (-1)^{n+1}{\pi^{3/ 2} \over {\sqrt 6}} \left( {3 \over 4} \right)^{-n} { a_n \over \Gamma\left(n +{1\over 2}\right)}
\end{equation}
which should behave, at large $n$, as
\begin{equation}
Q_n =1 + {\cal O}\left({1\over n}\right).
\end{equation}
We also plot its first Richardson transform to eliminate subleading tails. The ``prediction" (\ref{famousbw}) is indeed verified experimentally.
The story of the famous result (\ref{famousbw}) is a fascinating chapter of modern mathematical physics
(see \cite{simonreview}). The behavior of the $a_n$ at large $n$
was first obtained by Bender and Wu in \cite{oldbw}
by studying numerically the sequence of the first seventy-five coefficients.
They were even able to guess, from these numerical experiments, the exact form of the prefactor in (\ref{famousbw}).
In a subsequent (and classic) paper \cite{bw},
they showed that the result (\ref{famousbw}) could be derived analytically by looking at the one-instanton sector.
In the case of the quartic oscillator with positive coupling $g$, the origin leads to a stable quantum-mechanical ground state. The instanton solution only appears
when one inverts the sign of the coupling, and as a result the perturbative series for positive $g$ is alternating and Borel summable. In other cases, like for
example the cubic oscillator
\begin{equation}
V(x)={1\over 2} x^2 -g x^3,
\end{equation}
the origin is always unstable quantum-mechanically for any real value of the coupling constant $g$. An elementary calculation shows that the action of the instanton mediating the tunneling is
\begin{equation}
\label{cubicaction}
S_c= {A \over g^2}, \qquad A={2\over 15}.
\end{equation}
The large order behavior of the coefficients in the formal power series for the ground state energy
\begin{equation}
E(g)=\sum_{n=0}^\infty a_n g^{2n}
\end{equation}
can be computed by using a small modification of (\ref{akgen}) -essentially, one has to consider the discontinuity along the positive real axis of $g^2$ and then there is no sign alternating factor. Therefore,
\begin{equation}
a_n \sim A^{-n} \Gamma\left( n +1/2\right)
\end{equation}
where $A$ is given in (\ref{cubicaction}), see for example \cite{alvarezcubic} for a derivation of this result.
In this case the series is not Borel summable, reflecting the instability of the perturbative ground state.
In general, in one-dimensional quantum-mechanical problems, we will have {\it complex} instanton solutions with complex actions. They lead to perturbative series
which are Borel summable and have an oscillatory character. As we mentioned in the case of ODEs, the large order behavior is controlled by
the instantons with the smallest action in absolute value, and the phase of the action determines the oscillation period of the series.
Let us analyze in some detail a very instructive example, following \cite{blgzj} (a useful discussion can be also found in chapter 42 of \cite{zj}). Let us consider a particle situated at the origin of the potential
\begin{equation}
\label{gammapot}
{1\over g^2} V(gx) , \qquad V(x)={1\over 2} x^2 -\gamma x^3 +{1\over 2} x^4.
\end{equation}
The ground state energy has the expansion
\begin{equation}
E(g)=\sum_{n\ge 0} a_n g^{2n}.
\end{equation}
In this example there are two different situations (see \figref{gammapotplots}):
\begin{enumerate}
\item For $|\gamma| >1$, the origin is not an absolute minimum, which is in fact at
\begin{equation}
x_0=\frac{3\,\gamma+ {\sqrt{-8 + 9\,\gamma^2}}}{4}.
\end{equation}
\item For $|\gamma|<1$, the origin is the absolute minimum.
\end{enumerate}
\begin{figure}[!ht]
\leavevmode
\begin{center}
\includegraphics[height=4cm]{stable.pdf} \qquad \qquad \includegraphics[height=4cm]{unstable.pdf}
\end{center}
\caption{On the left: the potential $V(x)$ in (\ref{gammapot}) for $\gamma=0.95$, where the origin is the absolute minimum. On the right: the potential (\ref{gammapot}) for $\gamma=1.01$; the origin is now unstable to quantum tunneling.}
\label{gammapotplots}
\end{figure}
In the first case $|\gamma| >1$, the vacuum located at the origin is quantum--mechanically unstable, and there is a real instanton given by a trajectory from $x=0$ to the
turning point
\begin{equation}
x_+=\gamma-{\sqrt{\gamma^2-1}}.
\end{equation}
The action of this instanton can be written as
\begin{equation}
S_c={A\over g^2}, \qquad A=2 \int_0^{x_+} {\rm d} x \, (2 V(x))^{1\over 2} =-{2\over 3} +\gamma^2 -{1\over 2} \gamma(\gamma^2-1)\log {\gamma +1\over\gamma-1}.
\end{equation}
The one-loop prefactor for this instanton, appearing in (\ref{ol-qm}), is given by
\begin{equation}
c_0= {2\over \pi^{1/2}}(\gamma^2-1)^{-1/2}.
\end{equation}
The behavior when $|\gamma|<1$ is obtained by analytic continuation of this instanton configuration, which is
now complex. In fact, there are two complex conjugate instantons described by a particle which goes from $x=0$ to
\begin{equation}
x=\gamma \pm {\rm i} {\sqrt{1-\gamma^2}}.
\end{equation}
We have then to {\it add} the contributions of both instantons. Since $c_0$ becomes imaginary when $|\gamma|<1$, adding the complex-conjugate contributions of the two instantons gives
\begin{equation}
a_k \sim \Gamma(k+1/2) {\rm Im}\, A^{-k-1/2}.
\end{equation}
More generally, if we have a quantum-mechanical problem involving a complex instanton and its complex conjugate, and
\begin{equation}
A=|A| {\rm e}^{-{\rm i} \theta_A}, \qquad
c_0=|c_0| {\rm e}^{{\rm i} \theta_c},
\end{equation}
the large order behavior, obtained by adding the contribution of the two instantons, is oscillatory
\begin{equation}
a_k \sim \Gamma(k-\beta) |A|^{-k+\beta} \cos\left( (k-\beta)\theta_A + \theta_c\right).
\end{equation}
As in the case of ODEs analyzed in section \ref{ODEs}, when a perturbative series is Borel summable (like in the case of the quartic oscillator with $g>0$ or in the potentials with complex instantons), the Borel resummation of the perturbative series reconstructs the non-perturbative answer. There are two types of situations where there is no Borel summability: the first case corresponds to perturbative series around unstable minima, like the quartic oscillator with $g<0$ or the cubic oscillator. A different situation occurs in the case of the double-well potential. In that case, there is a stable ground state but the perturbative series is not Borel summable, and one has to consider lateral Borel resummations. The ground state energy can be reconstructed from the Borel-resummed perturbative series and the Borel-resummed instanton or trans-series solutions, in a way which is similar to the analysis of the Hastings--McLeod solution of Painlev\'e II in Example \ref{p2hm}, see \cite{zj-first,delabaerepham,zjj} for more details on this quantum-mechanical problem.
\subsection{Non-perturbative effects in Chern--Simons theory}
\label{nonpert-cs}
We have seen that many of the structures found in the study of ODEs reappear in QM: perturbative series are asymptotic series,
and expansions around non-trivial saddle points or
instantons are the analogues of trans-series. In particular, the singularity structure of the Borel transform of the perturbative
series is governed by the non-trivial saddles. In principle, the extension of these ideas to QFT should be straightforward: the analogue of a trans-series would be
the perturbative expansion around instanton configurations, and could think that these trans-series control the Borel transform of the perturbative series, and therefore
its large order behavior. However, the extension of the above ideas to realistic QFTs is plagued with serious difficulties. Probably,
the most important one is the fact that in renormalizable QFTs
there are other sources of factorial divergence in perturbative series, namely {\it renormalons} (see \cite{beneke}). Renormalons are particular types of
diagrams which diverge factorially due to the integration over momenta in the Feynman integral. Due to the existence of renormalons,
the analysis of the large order behavior of
perturbation theory inspired by QM does not extend straightforwardly to standard QFTs.
There are however QFTs where renormalon effects are absent, like Chern--Simons (CS) theory and many supersymmetric QFTs, and we will
focus here on this ``toy" QFTs, and more particularly on CS theory, where many different aspects of non-perturbative effects are relatively well understood.
CS theory is a QFT defined by the action
\begin{equation}
S=-{k \over 4\pi} \int_M {\rm Tr} \Bigl( {\cal A}\wedge {\rm d} {\cal A} + {2 {\rm i} \over 3} {\cal A}
\wedge {\cal A} \wedge {\cal A} \Bigr).
\label{csact}
\end{equation}
Here, ${\cal A}$ is a $G$-connection on the three-manifold $M$, where $G$ is a gauge group. We will mostly consider $G=U(N)$, and in this case our conventions are such that ${\cal A}$ is a Hermitian $N \times N$ matrix-valued one-form. Gauge invariance of the action requires \cite{deser}
\begin{equation}
k \in {\mathbb Z}.
\end{equation}
The 3d QFT defined by this action is a remarkable one: it is exactly solvable,
yet highly nontrivial, and provides a QFT interpretation of quantum invariants of knots and three-manifolds \cite{wittencs}.
The partition function of the theory on $M$ is defined by the path integral
\begin{equation}
\label{zcs}
Z(M) =\int {\mathcal D}{\cal A}\, {\rm e}^{{\rm i} S({\cal A})}
\end{equation}
and in principle it can be computed by standard perturbative techniques (see \cite{mmcslectures} for a review).
The saddle-points of the CS action are just flat connections
\begin{equation}
F({\cal A})=0.
\end{equation}
These are in one-to-one correspondence with embeddings
\begin{equation}
\pi_1(M) \rightarrow G,
\end{equation}
where $G$ is the gauge group. In principle, the path integral (\ref{zcs}) has various contributions coming from
expansions around the different saddle-points. The perturbative sector is defined by expanding around the trivial flat connection
\begin{equation}
{\cal A}=0,
\end{equation}
while instanton sectors are associated to non-trivial flat connections. Formal expansions around these instanton sectors define the analogue of trans-series for this QFT.
\begin{example} {\it Lens spaces}. The lens space $L(p,1)$ has fundamental group $\pi_1(L(p,1)) ={\mathbb Z}_p$. The set of $U(N)$ flat connections is
given by homomorphisms
\begin{equation}
{\mathbb Z}_p \rightarrow U(N),
\end{equation}
modulo gauge transformations. These are in turn given by splittings of $U(N)$ into $p$ factors
\begin{equation}
\label{lens-split}
U(N) \rightarrow U(N_1) \times U(N_2) \times \cdots \times U(N_p),
\end{equation}
corresponding to the homomorphism
\begin{equation}
\xi \rightarrow {\rm diag}\left(\underbrace{1, \cdots, 1}_{N_1}, \underbrace{\xi, \cdots, \xi}_{N_2}, \cdots, \underbrace{\xi^{p-1}, \cdots, \xi^{p-1}}_{N_p} \right),
\end{equation}
where
\begin{equation}
\xi=\exp\left( {2 \pi {\rm i} \over p} \right).
\end{equation}
Therefore, instanton sectors are in one-to-one correspondence with partitions of $N$ into $p$ nonzero integers.
The CS action evaluated at the flat connection labelled by $\{ N_j\}_{j=1, \cdots, p}$ is given by
\begin{equation}
\label{csia}
A={\pi {\rm i} k \over p} \sum_{j=1}^p (j-1)^2 N_j.
\end{equation}
\end{example}
\begin{figure}[!ht]
\leavevmode
\begin{center}
\includegraphics[height=2.5cm]{ASrelation.pdf} \\ \vskip .75cm\includegraphics[height=2cm]{ihx.pdf}
\end{center}
\caption{AS and IHX relations. }
\label{rels}
\end{figure}
We can now ask what is the nature of the perturbative series appearing in CS theory. It turns out that, generically, perturbation theory around the trivial connection ${\cal A}=0$
is factorially divergent. The reason for this is the same as in QM, namely, the factorial growth in the number of diagrams. Let us see this in some detail. We will denote by
\begin{equation}
F(M, {\bf g}, g_s)
\end{equation}
the contribution of the trivial connection to
the free energy $\log \, Z$ of CS theory. Here, ${\bf g}$ is the Lie algebra associated to $G$, and
\begin{equation}
\label{gscs}
g_s={2\pi {\rm i} \over k}.
\end{equation}
Using standard perturbative techniques, it is easy to see that the free energy can be written as a formal power series of the form
\begin{equation}
\label{fM}
F(M, {\bf g}, g_s) =\sum_{n=1}^{\infty} \sum_{\Gamma \in {\cal A}^{(c)}_n(\emptyset)} c_\Gamma(M) W_{\bf g}(\Gamma) g_s^n.
\end{equation}
Let us spell out in detail the ingredients in this formula. We first construct the space of Feynman diagrams,
${\cal A}^{(c)}(\emptyset)$. This is the space of connected, trivalent diagrams with no
external legs (i.e. connected vacuum bubbles) modulo
the so-called IHX and AS relations, shown in \figref{rels}. It is graded by the {\it degree of the diagram} $n$, which is half the number of vertices (and
also equals the number of loops minus one):
\begin{equation}
{\cal A}^{(c)}(\emptyset) =\oplus_{n=1}^{\infty} {\cal A}^{(c)}_n(\emptyset).
\end{equation}
A basic fact is that, for each $n$, this space has finite dimension. The very first dimensions are listed in Table \ref{dims}.
\begin{table}[htbp]
\centering
\begin{tabular}{|c||c|c|c|c|c|c|c|c|c|c|}\hline
$n$ & 1 & 2 & 3 & 4 & 5 & 6 & 7 & 8 & 9 & 10 \\ \hline
$d(n)$ & 1 & 1 & 1 & 2 & 2 & 3 & 4 & 5 & 6 & 8 \\
\hline
\end{tabular}
\caption{Dimensions $d(n)$ of ${\cal A}^{(c)}_n(\emptyset)$ up
to $n=10$.}
\label{dims}
\end{table}
An explicit choice of basis up to $n=5$ is shown below:
\begin{eqnarray}
\label{graphi}
n=1: & & \,\,\,\,\, \twoVgraph \nonumber\\
n=2: & & \,\,\,\,\, \fourVgraph \nonumber\\
n=3: & & \,\,\,\,\, \sixVgraph \nonumber\\
n=4: & & \,\,\,\,\, \eightVgraphI \,\,\,\,\,
\eightVgraphII \nonumber\\
n=5: & & \,\,\,\,\, \tenVgraphI \,\,\,\,\,
\tenVgraphII \nonumber\\
\end{eqnarray}
The second ingredient is the {\it weight system}. This is an instruction to produce a number for each diagram, given the data of a Lie algebra with structure constants and
a Killing form,
\begin{equation}
[T_a, T_b]=f_{abc}.
\end{equation}
To each trivalent vertex we associate the structure constant $f_{abc}$ as shown in \figref{weight}. In QFT we call
this ``computing the group factor of the diagram $\Gamma$."
The final ingredient is $c_\Gamma(M)$. It is simply given by the Feynman integral associated to the graph $\Gamma$. It is possible to show that each $c_\Gamma (M)$ is a
topological invariant of $M$. For example,
\begin{equation}
c_{\theta} (M)=\text{Casson invariant of $M$}.
\end{equation}
\begin{figure}[!ht]
\leavevmode
\begin{center}
\includegraphics[height=2cm]{weight.pdf} \end{center}
\caption{Weight system. }
\label{weight}
\end{figure}
Based on our experience with the quantum anharmonic oscillator, we should ask
how many diagrams we have at each loop order $n+1$. It has been shown by Garoufalidis and Le in \cite{gl} that
\begin{equation}
{\rm dim}\left( {\cal A}^{(c)}_n(\emptyset) \right) \sim n!, \qquad n\gg 1.
\end{equation}
Therefore, the series (\ref{fM}) will be factorially divergent. Interestingly, there is no other source of factorial divergences. These divergences could come from the weight factors, or from the Feynman integrals. However, it is easy to see that the weight factors
can only grow exponentially. It is also shown in \cite{gl} that the Feynman integrals grow with the degree as
\begin{equation}
\left| c_\Gamma (M) \right| \sim C^n_M,
\end{equation}
where $C_M$ is a constant that depends on the three-manifold under consideration. In QFT terms this means that Feynman integrals grow at most exponentially, i.e. that there are no renormalons (since in a renormalon diagram with $n$ loops, the Feynman integral diverges itself factorially, as $n!$).
\begin{example} \label{cs-example} {\it Chern--Simons theory on Seifert spheres}. The general prediction of factorial divergence can be verified in detail for CS theory on Seifert homology spheres. A Seifert homology sphere is specified by
$r$ pairs of coprime integers $(p_j, q_j)$, $j=1, \cdots, r$, and is denoted by
\begin{equation}
M=X\left( {p_1 \over q_1}, \cdots, {p_r \over q_r}\right).
\end{equation}
One also defines
\begin{equation}
P =\prod_{s=1}^r p_s, \qquad H=P \sum_{s=1}^r {q_s \over p_s}.
\end{equation}
$H$ is the order of the first homology group $H_1(M, {\mathbb Z})$.
When $r=1$, Seifert spaces are just lens spaces,
\begin{equation}
L(p,q)=X(q/p).
\end{equation}
The partition function of CS theory on a Seifert space can be written as a matrix integral. This was shown for $G=SU(2)$ by Lawrence and Rozansky \cite{lr},
and it was extended to arbitrary gauge groups in \cite{mm}. There are two types of non-trivial flat connections: the reducible ones, and the irreducible ones.
In a Seifert space and when $G$ is a simply-laced group,
reducible flat connections are labelled by elements in
\begin{equation}
t \in \Lambda_r/H \Lambda_r,
\end{equation}
where $\Lambda_r$ is the root lattice. The contribution of such a connection to the partition function is given by (up to an overall normalization, see \cite{mm} for the details)
\begin{equation}
\label{betain}
Z(M) \propto \int {\rm d} \lambda \, {\rm e}^{ -{\lambda^2/2 \hat g_s } - k t\cdot \lambda}{
\prod_{s=1}^r \prod_{\alpha>0} 2 \sinh {\lambda\cdot \alpha \over
2 p_s} \over \prod_{\alpha>0}
\Bigl( 2 \sinh {\lambda\cdot \alpha \over
2} \Bigr)^{r-2}},
\end{equation}
where
\begin{equation}
\label{hatg}
\hat g_s ={P\over H} g_s,
\end{equation}
$\lambda$ belongs to the weight lattice $\Lambda_w$, $\alpha>0$ are the positive roots, and the products are computed with the standard Cartan--Killing form. In the case of
$U(N)$ we have
\begin{equation}
t \in {\mathbb Z}^N/ H {\mathbb Z}^N,
\end{equation}
and we write
\begin{equation}
t=\sum_{i=1}^N t_i e_i, \qquad \lambda =\sum_{i=1}^N \lambda_i e_i, \qquad \{ \alpha\}=\{ e_i -e_j\}_{1\le i<j\le N},
\end{equation}
where $e_i$ is the orthonormal basis of the weight lattice, and $0\le t_i \le H-1$. We then find,
\begin{equation}
\label{unbetain}
Z(M)\propto \int \prod_{i=1}^N {\rm d} \lambda_i \, {\rm e}^{ -{1\over 2 \hat g_s} \sum_{i=1}^N \lambda_i^2- k \sum_{i=1}^N t_i \lambda_i}
{\prod_{s=1}^r \prod_{i<j} 2 \sinh {\lambda_i-\lambda_j \over
2 p_s} \over \prod_{i<j}
\Bigl( 2 \sinh { \lambda_i-\lambda_j \over
2} \Bigr)^{r-2}}.
\end{equation}
The integration contour in (\ref{betain}), (\ref{unbetain}) is chosen in such a way that the Gaussian integral
converges.
The case of $SU(2)$ is particularly simple. Up to an overall constant, the contribution of the trivial connection to the partition function is just an integral,
\begin{equation}
Z\propto \int {\rm d} \lambda \, {\rm e}^{ -{\lambda^2/4 \hat g_s } } f(\lambda; p_s),
\qquad f(\lambda;p_j)=
\left( 2 \sinh { \lambda \over
2} \right)^{2-r} \prod_{s=1}^r \left( 2 \sinh {\lambda \over
2 p_s} \right).
\end{equation}
This can be expanded in power series in $g_s$,
\begin{equation}
Z \propto \sum_{n=0}^{\infty} a_n g_s^n, \qquad a_n ={f^{(2n)}(0)\over n!},
\end{equation}
and one finds \cite{lr}
\begin{equation}
a_n \sim \left( -{P \over \pi^2 H}\right)^n n!,
\end{equation}
i.e. we have a factorial divergence, as expected. The growth is controlled by
\begin{equation}
A =-{\pi^2 H \over P}.
\end{equation}
We would expect this quantity to be the action of a non-trivial saddle-point of the theory, and indeed this is the action of an irreducible flat connection on the Seifert manifold.
\end{example}
The results of \cite{gl} about the structure of the CS perturbative series rely on a mathematical construction for this series
called the LMO invariant \cite{lmo}, which has been
much studied during the last years. The structure of the general trans-series, which correspond to the perturbative expansion around a non-trivial instanton solution, is less understood, and no mathematical construction has been proposed so far. Questions on classical asymptotics and Borel summability in CS theory have started to be addressed only recently, see \cite{garoufalidis,wittenanalytic} for some results and/or conjectures.
\subsection{The $1/N$ expansion}
The problem of non-perturbative effects and asymptotics becomes much more interesting when we look at gauge theories in the $1/N$ expansion \cite{thooft}. In this expansion, the free energy and correlation functions of the gauge theory are expanded in powers of $N$ or of the coupling constant $g_s$, but keeping the 't Hooft parameter
\begin{equation}
t=g_s N
\end{equation}
fixed. For example, the expansion of the free energy around the trivial connection (i.e. what we have called the perturbative series) is re-organized as
\begin{equation}
\label{genusex}
F=\sum_{g=0}^{\infty} F_g(t) g_s^{2g-2},
\end{equation}
where $F_g(t)$ is a sum over {\it double-line graphs} or {\it fatgraphs} of genus $g$. In this reorganization of the theory, the dominant contribution comes from the genus zero or planar
diagrams.
\begin{figure}[!ht]
\leavevmode
\begin{center}
\includegraphics[height=.8cm]{edgepropa.pdf}
\end{center}
\caption{Thickening an edge.}
\label{edgepropa}
\end{figure}
\begin{figure}[!ht]
\leavevmode
\begin{center}
\includegraphics[height=3cm]{thickening.pdf}
\end{center}
\caption{Thickening a marked vertex.}
\label{resolution}
\end{figure}
In the case of CS theory, the structure of the $1/N$ expansion can be made very explicit, as follows. Consider a graph
$\Gamma$ in ${\cal A}^{(c)}_n(\emptyset)$, and apply the {\it thickening rules} depicted in \figref{edgepropa} and \figref{resolution}.
The thickening rules can be regarded as a map that associates to each diagram $\Gamma$ a formal linear combination of {\it fatgraphs} $\Gamma_{g,h}$,
which are Riemann surfaces with boundaries and are classified topologically by their genus $g$ and number of boundaries $h$:
\begin{equation}
\Gamma \rightarrow \sum_{g,h} p_{g,h}(\Gamma) \Gamma_{g,h}.
\end{equation}
It is easy to see that the weight system of $U(N)$ can be written in terms of fatgraphs \cite{cvitanovic,bar-natan},
\begin{equation}
W_{{\rm u}(N)}(\Gamma)=\sum_{g,h} p_{g,h}(\Gamma) N^h.
\end{equation}
An example is shown in \figref{thetares}. One then finds the following expression for the free energy around the trivial connection:
\begin{equation}
F(M, {\rm u}(N), g_s) = \sum_{g=0}^{\infty} \sum_{\Gamma_{g,h}} c_\Gamma (M) p_{g,h}(\Gamma) N^h g_s^{E(\Gamma)-V(\Gamma)},
\end{equation}
where $E(\Gamma), V(\Gamma)$ are the number of edges and vertices in $\Gamma$ (these topological data do not depend on the fattening of the graph).
If we now use Euler's relation,
\begin{equation}
E(\Gamma)-V(\Gamma)=2g-2+h,
\end{equation}
we see that $F(M, {\rm u}(N), g_s)$ is given by the formal series (\ref{genusex}), where
$F_g(t)$ is defined as a formal infinite sum over all fatgraphs $\Gamma_{g,h}$ with {\it fixed} $g$
\begin{equation}
\label{aght}
F_g(t)=\sum_{h\ge 0} a_{g,h} t^h, \qquad a_{g,h}=\sum_{\Gamma_{g,h}} c_{\Gamma}(M) p_{g,h}(\Gamma).
\end{equation}
\begin{figure}[!ht]
\leavevmode
\begin{center}
\includegraphics[height=3cm]{thetares.pdf}
\end{center}
\caption{Fatgraphs obtained from the theta diagram.}
\label{thetares}
\end{figure}
As is well-known (see for example the classic review in \cite{coleman}) the $1/N$ expansion described above can be implemented in any $U(N)$
gauge theory where the fields transform in the adjoint representation of $U(N)$, and it can be applied to any gauge-invariant observable of the
theory (when one expands around the trivial connection).
The structure of the free energy as a double power series,
see (\ref{genusex}) and (\ref{aght}), which we have written above based in the analysis of CS theory, can be easily seen to hold in any theory with fields in the adjoint of $U(N)$. The $1/N$ expansion is particularly clean in theories where the coupling constant $g_s$ does not run, i.e. in conformal field theories and topological field theories.
The first question that we have to ask in the search for non-perturbative effects is: what is the nature of the formal power series appearing in the theory, like for example in the series defining the free energy of the theory expanded around the trivial connection? In the case of the $1/N$ expansion, since there are two parameters, we have two different questions to ask:
\begin{enumerate}
\item What is the nature of the formal power series in $t$ appearing in (\ref{aght}), defining $F_g(t)$?
\item For a fixed $t$, what is the nature of the power series in $g_s$ appearing in (\ref{genusex})?
\end{enumerate}
The answer to these questions is the following: in theories with no renormalons, the functions $F_g(t)$ are analytic
at the origin, i.e. the power series (\ref{aght}) have a finite radius of convergence which, moreover,
is common to all of the $F_g(t)$. However, for fixed $t$, the functions $F_g(t)$ grow like
\begin{equation}
F_g(t) \sim (2g)!
\end{equation}
and the series (\ref{genusex}) diverges factorially.
The analyticity of the $1/N$ expansion at fixed genus was first noticed in \cite{knn} and analyzed in
some models by 't Hooft \cite{thooft}. It can be proved in detail in simple $U(N)$ gauge theories, such
as matrix models (see \cite{gp} for a recent study) and CS theory \cite{glm}. The factorial growth of the $1/N$ expansion was pointed out, in a slightly different context, in \cite{shenker}.
\begin{figure}[!ht]
\leavevmode
\begin{center}
\includegraphics[height=3.5cm]{petals.pdf}
\end{center}
\caption{Counting flowers with $n$ petals.}
\label{petals}
\end{figure}
The basic reason for the analyticity of the $F_g(t)$ is that the number of fatgraphs of {\it fixed} genus grows only {\it exponentially}, and not factorially. A nice example of
this contrasting behaviour is the counting of flower-like graphs. The number of flowers made out of $n$ ``thin petals", like the one shown on the left in \figref{petals}, is given by
the number of possible contractions in a vertex with $2n$ legs, and equals $(2n-1)!!$, which grows factorially for $n\gg 1$. In contrast, the number of ``genus zero" flowers with $n$ ``thick petals," like the one shown on the right in \figref{petals}, is given by the Catalan number (see for example \cite{difrancesco})
\begin{equation}
C_n ={(2n)! \over (n+1)! n!},
\end{equation}
which at large $n$ grows only exponentially, like $4^n$.
We will now give an argument for the analyticity of the free energy in theories without renormalons, following \cite{glm}.
Let us come back to the formal power series appearing in (\ref{aght}). It is easy to see that
\begin{equation}
p_{g,h}(\Gamma) \le C_p^{2g-2+h},
\end{equation}
where $C_p$ is a constant. For example, in a theory with a purely cubic interaction, like CS theory,
each vertex gives two resolutions (see \figref{resolution}), and the maximum number of terms is $2^V$. Since in a theory with a cubic interaction we have
\begin{equation}
3V=2 E,
\end{equation}
we deduce
\begin{equation}
p_{g,h}(\Gamma) \le 2^V=4^{E-V}=4^{2g-2+h}.
\end{equation}
In Yang--Mills theory there are also quartic vertices, which from the point of view of this counting can be regarded as two cubic vertices joined by an edge, leading to a similar estimate. The next step is to analyze the Feynman integrals, $c_\Gamma$. If the theory has renormalons, they can grow factorially with the number of vertices. But in a theory without renormalons they grow only exponentially in the number of vertices, and we can write
\begin{equation}
\label{norenormalon}
|c_\Gamma|\sim C_F^{2g-2+h},
\end{equation}
where $C_F$ is another constant. This has been shown to be the case for a large class of diagrams in
QM \cite{bender}, and it has been proved in CS theory, by using the formulation in terms of the LMO invariant \cite{gl}. We then have,
\begin{equation}
a_{g,h} \sim (C_p C_F)^{2g-2+h} N_{g,h},
\end{equation}
where $N_{g,h}$ is the number of double-line diagrams with genus $g$ and $h$ holes, counted with the appropriate weight.
For example, if we normalize all vertices of degree $p$ with a factor $1/p!$, the weight of a diagram $\Gamma_{g,h}$ is given by
\begin{equation}
{1\over \left| {\rm Aut}(\Gamma_{g,h}) \right|},
\end{equation}
i.e. the inverse of the order of its automorphism group, see \cite{difrancesco}.
The counting of fatgraphs (weighted by their automorphism group, as above) has been developed very much both in
combinatorics and in mathematical physics. The main result we have in this respect is that
\begin{equation}
N_{g,h} \sim C_D^V C_G^g (2g)!,
\end{equation}
see for example \cite{glm}. We conclude that
\begin{equation}
a_{g,h}\sim (2g)! C_1^g C_2^h.
\end{equation}
Therefore, for {\it fixed genus}, and assuming the growth (\ref{norenormalon}), the functions $F_g(t)$ defined by the power series (\ref{aght}) are analytic at $t=0$ with a finite radius of convergence $\rho$ common to all $g$. This is the analiticity result
we wanted to establish. Generically $\rho<\infty$, and there is typically a singularity $t_c$ in the $t$-plane somewhere in the circle of radius $\rho$,
\begin{equation}
\label{singrho}
|t_c|=\rho.
\end{equation}
This argument shows as well that {\it for fixed $t$} (and inside the common domain of convergence) the sequence $F_g(t)$ will diverge like $(2g)!$. This was our second claim above.
The analyticity of the genus $g$ free energies $F_g(t)$ suggests that $t$ should be regarded naturally as an arbitrary,
{\it complex} variable. In the same way, $g_s$ should be also thought of as a complex variable, although in the original gauge theory it can only take special
values: in Yang--Mills theory $g_s$ is the square of the Yang--Mills coupling constant
$g^2_{\rm YM}$ and it is real and positive, while in CS theory it is of the form (\ref{gscs}). The complexification of these variables is natural in
the context of some large $N$ dualities. For example, in dualities between large $N$ CS theory and topological string theory \cite{gv}, the 't Hooft parameter is identified with
a {\it complexified} K\"ahler parameter of a Calabi--Yau manifold. We will then take the point of view that all gauge theory parameters belong to a complex {\it moduli space}.
\subsection{Large $N$ instantons}
We have seen that, in theories without renormalons,
the power series defining free energies $F_g(t)$ at {\it fixed} genus are analytic at $t=0$. Therefore, there is in principle no indication of non-perturbative
effects to be taken into account. Moreover, in many interesting QFTs, one can perform an analytic continuation of the functions $F_g(t)$
to a region in the complex plane which includes all physical values of the
't Hooft coupling, providing in this way a complete description of the theory order by order in the $1/N$ expansion.
This procedure is sometimes called {\it weak-strong coupling interpolation}, since one starts with a function defined at weak 't Hooft coupling
(i.e. in a neighbourhood of $t=0$) and ends up with a function defined for some angular region in the complex plane where $|t|\gg 1$.
We will see below an example of this analytic continuation, concerning the free energy of ABJM theory).
However, as we have also seen, the price to pay for the analyticity of $F_g(t)$ for fixed $g$ is that,
at fixed $t$, the sequence of free energies grows doubly-factorially with the genus. More precisely,
one has the growth
\begin{equation}
\label{largeg}
F_g(t) \sim (2g)! (A(t))^{-2g}, \qquad g \gg1,
\end{equation}
where $A(t)$ is a function of $t$. This behavior has been found in many simple models of the $1/N$ expansion,
like matrix models (and their double-scaling limits), and in some supersymmetric gauge theories.
As we have seen, in theories without renormalons, the standard factorial growth of the perturbative expansion is typically
related to the existence of instantons in the theory. It is then natural to suspect that the growth (\ref{largeg}) of the
$1/N$ expansion should be also due to instanton-like objects which we will call {\it large $N$ instantons}, and that $A(t)$ is the action of such an object.
What is a large $N$ instanton? In general, large $N$ instantons are built upon classical instantons.
To see this, consider a gauge theory with coupling constant $g_s$ and 't Hooft parameter $t$, as well as an instanton
solution whose action (including the coupling) is given by
\begin{equation}
S_c={A \over g_s}.
\end{equation}
We will also assume that $A$ is of order one at large $N$:
\begin{equation}
A \sim {\cal O}(1).
\end{equation}
For example, in Yang--Mills theory the usual instantons with low instanton number satisfy this property. This is due to the fact we can build an instanton
by using just an $SU(2)$ subgroup of $U(N)$. This is also the case in matrix models, where, as we will see in detail, instantons are obtained by eigenvalue
tunneling of one eigenvalue out of $N$. There are instanton configurations whose action is of order $N$ (``giant instantons"), but we will not consider them here.
Let us now evaluate the free energy of a gauge theory around such an instanton configuration, in perturbation theory. The one-loop fluctuations give a term
with the generic form (at large $N$)
\begin{equation}
\left( {c_0 \over g_s}\right)^{c_1 N},
\end{equation}
where $c_1 N$ is the number of zero modes, or collective coordinates of the instanton, at large $N$. This factor comes from the canonical normalization of the
modes in the path integral, since we can always normalize the fields in such a way that the action has an overall power of $1/g_s$. On top
of the classical action and the one-loop fluctuations, at large $N$ we have to consider as well all vacuum, connected planar
diagrams (at all loops) in the background of the classical instanton configuration. To see how these appear,
let us focus for simplicity on an interaction given by a cubic vertex, as in \figref{resolution}. Let us consider fluctuations around
the instanton solution $\overline{{\cal A}}$
\begin{equation}
{\cal A}=\overline{{\cal A}} +{\cal A}'.
\end{equation}
\begin{figure}[!ht]
\leavevmode
\begin{center}
\includegraphics[height=3cm]{instvertex.pdf} \qquad \qquad \includegraphics[height=3.5cm]{tinst.pdf}
\end{center}
\caption{The instanton vertex (\ref{ivertex}) (left) and a planar diagram contributing to the large $N$ instanton action $A(t)$ a term of order $t^3$ (right).}
\label{instvertex}
\end{figure}
The action for the fluctuations will include a vertex of the form
\begin{equation}
\label{ivertex}
\sum_{i,j,k} (\overline{{\cal A}}_\mu)^i_{~j} \, ({\cal A}_\nu)^j_{~k} \, ({\cal A}_\rho)^k_{~i}
\end{equation}
and involving the instanton background. We can represent this vertex in the double-line notation as in \figref{instvertex}, where the red line ending on the blob
corresponds to the
instanton background. It gives a factor
of $g_s$, but only the interior line gives a factor of $N$ after tracing over. A simple example of a diagram
contributing to the instanton action is the one depicted on the r.h.s. of \figref{instvertex}. The inner closed lines gives a factor of $N^3$,
and the diagram is proportional to
\begin{equation}
{\rm Tr}\bigl( {\overline{{\cal A}}}^3 \bigr) N^3 g_s^2 = {1\over g_s} t^3 {\rm Tr}\bigl( {\overline{{\cal A}}}^3 \bigr),
\end{equation}
since there are nine edges $E=9$ and seven vertices $V=7$, so the power of $g_s^{E-V}$ is two. Therefore, this diagram gives a correction of order $t^3$.
We conclude that, at large $N$, the contribution of an instanton to the free energy is of the form
\begin{equation}
\exp\left( -{A(t) \over g_s} \right),
\end{equation}
where
\begin{equation}
\label{exlna}
A(t)= A - c_1 t \log \left( {c \over t} \right)+ {\cal O}(t).
\end{equation}
The first term in this equation comes from the classical action of the instanton. The second term, which is logarithmic in $t$, incorporates the one-loop correction.
Higher loop corrections lead to a series in $t$.
We then see that, from the point of view of the $1/N$ expansion,
the instanton action is promoted to a non-trivial function $A(t)$ of the 't Hooft parameter, which we calll the {\it large $N$ instanton action}.
The calculation of large $N$ instanton actions in realistic theories is of course difficult, since we have to sum up an infinite number of planar diagrams (in the same
way that calculating the genus zero free energy involves adding up an infinite number of diagrams, at all loops).
An alternative, more general way to think about large $N$ instantons is in terms of large $N$ effective actions.
It is believed that gauge theories at large $N$ can be reformulated
in terms of a ``large $N$ effective action" with coupling constant (or $\hbar$ constant) equal to $1/N$, and involving a field
sometimes called the ``master field" \cite{wittenmaster}. In this effective theory, correlation functions
at large $N$ are obtained simply by solving the classical equations of motion of the effective action in the presence of sources.
A large $N$ instanton is an instanton solution of this large $N$ effective theory, i.e. a saddle point of the Euclidean version of the theory, with finite action.
This is in general different from the usual instanton configurations, which are saddle point of the {\it classical} Euclidean action.
However, as we have seen, large $N$ instantons can often be thought of as deformations of the classical instantons,
where the deformation parameter is the 't Hooft parameter: as it is manifest in (\ref{exlna}), when $t\rightarrow 0$ we recover the action of the ``classical" gauge theory instanton.
Explicit examples of large $N$ instantons were obtained in the ${\mathbb C}{\mathbb P}^N$ model in
\cite{affleck,munster}, as deformations of classical instantons. A particularly beautiful example is the large $N$
instanton of two-dimensional Yang--Mills theory obtained in \cite{gm}. In the next section we will see
an example of a large $N$ instanton in a simple toy model, namely matrix quantum mechanics.
Large $N$ instantons play an important r\^ole in the structure of the $1/N$ expansion. First of all,
in the calculation of physical quantities in a large $N$ theory, we expect to have a trans-series structure
with a perturbative sector around the trivial instanton sector, and then a series of sectors corresponding to
$1/N$ expansions around large $N$ instantons. The instanton sectors are weighted by
\begin{equation}
{\rm e}^{-A(t)/g_s}.
\end{equation}
As it will be obvious in examples, $A(t)$, the large $N$ instanton action, is in general a non-trivial function of the 't Hooft parameter $t$.
As expected from the examples developed in these lectures, this is the same quantity appearing in (\ref{largeg}) and controlling the subleading large $g$ asymptotics.
The second important property of large $N$ instantons is their dynamical r\^ole in triggering phase transitions.
If ${\rm Re}(A(t)/g_s)>0$, large $N$ instantons are suppressed exponentially at large $N$ (or small $g_s$). This might lead to think
that ``instantons are suppressed at large $N$," but as Neuberger pointed out in \cite{neuberger},
this is not necessarily the case. It might happen for example that $A(t)$
vanishes at a particular value of $t$, and in this case the contribution of instantons become as important as the perturbative contributions.
The value of the 't Hooft parameter for which $A(t)$ vanishes signals very often a {\it large $N$ phase transition}, or a critical point, in the theory.
This behavior is nothing but the large $N$ version of the jump in the asymptotics occurring along
an anti-Stokes line. It turns out that the critical value of the 't Hooft parameter is also, in many cases, the first singularity $t_c$ in the $t$-plane which
we found in (\ref{singrho}). One of the first examples of such a transition was found in \cite{gw,wadia,wadia2}, and it was argued in \cite{neuberger}
that this should be due to the vanishing of the instanton action, see \cite{mmnp} for a detailed verification for the model analyzed in \cite{gw,wadia}.
\begin{remark} {\it Large $N$ instantons as D-branes}. Notice that the diagrams contributing to the large $N$ instanton action
are similar to the diagrams involving an external boundary, coming from a Wilson loop for example. This means that, if the large $N$ theory has a string dual,
the large $N$ instanton should be associated to a hole in the worldsheet --in other words, to a D-brane. The relation between large $N$ instantons and D-branes
in string theory goes back to the observation by Shenker \cite{shenker} that the large order behavior (\ref{largeg}) should be typical of genus expansions
in string theory \cite{shenker}. This observation was based on the duality between doubly-scaled large $N$ matrix models (which also display this $(2g)!$ growth)
and noncritical strings. According to Shenker, however, this growth should be a universal feature of string perturbation theory. The non-perturbative effects associated
to this stringy behavior were later on identified with D-brane and membrane effects in \cite{polchinski,bbs}.
\end{remark}
\begin{example} {\it A large $N$ instanton in CS theory}. Let us consider $U(N)$ CS theory on the lens space $L(2,1)$, and the instanton (i.e. the non-trivial
flat connection) characterized by the splitting
\begin{equation}
(N-1, 1).
\end{equation}
The path integral around this instanton configuration is weighted by the exponentially small factor ${\rm e}^{-A/\hat g_s}$,
where $\hat g_s=2 g_s$ (see (\ref{hatg})), and one finds from (\ref{csia}) that
\begin{equation}
A= {\pi^2 \over 2}.
\end{equation}
Using the matrix model representation of the partition function given in (\ref{unbetain}), it is possible to calculate the large $N$ instanton action built on this classical instanton \cite{mpp}. The result is $A(t)/\hat g_s$, where
\begin{equation}
\label{lensaction}
A(t) =2 \, {\rm Li}_2({\rm e}^{-t/2}) -2\, {\rm Li}_2(-{\rm e}^{-t/2})= {\pi^2 \over 2} -t \log \left( {4 {\rm e} \over t}\right) +{\cal O}(t^3),
\end{equation}
which has indeed the expected structure (\ref{exlna}).
\end{example}
In general, it is difficult to obtain explicit expressions for the $1/N$ expansion around large $N$ instanton configurations, hence our understanding of non-perturbative
effects in general large $N$ theories is limited. In the next section we will make a detailed study on large $N$ instantons in matrix models, where one has a lot of analytic control and many
results are available. We will now present another instructive solvable example, namely large $N$ instantons in Matrix Quantum Mechanics.
\subsection{Large $N$ instantons in Matrix Quantum Mechanics}
In order to illustrate the general considerations on $1/N$ expansions and large $N$ instantons explained above, we will now discuss Matrix Quantum Mechanics (MQM), introduced and first studied in \cite{bipz}. This is a quantum-mechanical model where the degrees of freedom are the entries of a Hermitian $N\times N$ matrix $M$
and the Euclidean Lagrangian is given by
\begin{equation}
\label{LagM}
L_M ={\rm Tr} \Bigl[ {1\over 2}\dot M^2 + V(M) \Bigr],
\end{equation}
where $V(M)$ is a polynomial in $M$. Notice that this problem has a $U(N)$ symmetry
\begin{equation}
M \rightarrow U M U^{\dagger}
\end{equation}
where $U$ is a constant unitary matrix, and it promotes the standard one-dimensional QM problem to a problem where fields are in the adjoint
representation of a $U(N)$ symmetry group and can then be studied in the $1/N$ expansion. We will
assume that the potential $V(M)$ is of the form
%
\begin{equation}
V(M)={1\over 2} M^2 + V_{\rm int}(M)
\end{equation}
%
where $V_{\rm int}(M)$ is the interaction term.
The Feynman rules
are the same as in the case of QM, with the only difference that we will
now have ``group factors" due to the fact that $M$ is matrix valued.
\begin{figure}[!ht]
\leavevmode
\begin{center}
\includegraphics[height=5cm]{mqmf.pdf}
\end{center}
\caption{Feynman rules for matrix quantum mechanics.}
\label{mqmf}
\end{figure}
The propagator of MQM is
%
\begin{equation}
{{\rm e}^{- |\tau|} \over
2} \, \delta_{ik} \delta_{jl},
\end{equation}
%
and for a theory with a quartic interaction
%
\begin{equation}
\label{quarticg}
V_{\rm int}(M)={t \over 4 N} M^4
\end{equation}
%
the Feynman rules are illustrated in \figref{mqmf}. The factor of $N$ in (\ref{quarticg})
is introduced in order to have a standard large $N$ limit, as we will see in more detail later.
One can use these rules to compute the perturbation series of the ground state
energy of MQM, which is obtained by considering connected bubble diagrams, as in conventional QM. Each Feynman diagram leads to a group factor which
depends on $N$, i.e. each conventional Feynman
diagram gives various fatgraphs that can be classified according to their topology.
A fatgraph with $V$ vertices and $h$ boundaries will have a factor
\begin{equation}
t^V N^{h-V} =t^V N^{2-2g},
\end{equation}
since the number of edges is twice the number of vertices, $E=2V$ and
\begin{equation}
h+E-V=h-V.
\end{equation}
Planar diagrams, as usual, are proportional to $N^2$, and the ground state energy has the structure
%
\begin{equation}
E(t, N)=\sum_{g=0}^\infty N^{2-2g} {\cal E}_g(t).
\end{equation}
%
The first few terms in the expansion of ${\cal E}_0(t)$ can be easily computed in perturbation theory,
\begin{equation}
\label{planarquarticg}
{\cal E}_0(t)={1\over 2} +{1\over 8} t -{17\over 256} t^2 +{75\over 1024}t^3+\cdots
\end{equation}
%
As first found in \cite{bipz}, the planar ground state energy in MQM
can be obtained {\it exactly} by using
a free fermion formulation. This exact result {\it resums} in closed form all the planar
diagrams of MQM contributing to the ground state energy. This goes as follows.
After quantization of the system we obtain a Hamiltonian
operator
\begin{equation}
H={\rm Tr} \Bigl[ -{1\over 2}{\partial^2 \over \partial M^2} + V(M) \Bigr],
\end{equation}
where
\begin{equation}
\label{mdo}
{\rm Tr} {\partial^2 \over \partial M^2}=\sum_{ab}
{\partial^2 \over \partial M_{ab} \partial M_{ba}}.
\end{equation}
In order to study the spectrum of this Hamiltonian, it is useful to change variables
\begin{equation}
\label{Mdiag}
M=U \Lambda U^{\dagger},
\end{equation}
where
\begin{equation}
\Lambda ={\rm diag}(\lambda_1, \lambda_2, \cdots, \lambda_N)
\end{equation}
is a diagonal matrix. It can be shown that, when acting on singlet states
(i.e., states that are invariant under the full $U(N)$ group), the differential
operator (\ref{mdo}) has the form,
\begin{equation}
-{1\over 2} {\rm Tr} {\partial^2 \over \partial M^2} = -{1\over 2}{1\over \Delta(\lambda)} \sum_{a=1}^N \Bigl(
{\partial \over \partial \lambda_a}\Bigr)^2 \Delta(\lambda).
\end{equation}
Due to the residual Weyl symmetry, a singlet state is represented by a symmetric function of the $N$ eigenvalues,
\begin{equation}
\Psi(\lambda_i).
\end{equation}
We now introduce a completely
{\it antisymmetric} wavefunction
\begin{equation}
\label{fwf}
\Phi(\lambda) =\Delta(\lambda) \Psi(\lambda),
\end{equation}
where
\begin{equation}
\label{vander}
\Delta(\lambda)=\prod_{a<b} (\lambda_a -\lambda_b)
\end{equation}
is the Vandermonde determinant. It is now easy to see that the original problem of calculating the energies for singlet states becomes the problem of calculating the energy of $N$ non-interacting fermions (since the function (\ref{fwf}) is completely antisymmetric) in an external potential $V(\lambda)$. We will now assume that this potential $V(\lambda)$ has good large $N$ scaling properties. More precisely, we will assume
that the $N$-dependence of the potential $V(\lambda)$ is such that
\begin{equation}
V(\lambda) = N v\left( {\lambda \over {\sqrt{N}}} \right),
\end{equation}
where $v(\lambda)$ does not contain $N$. For example, the quartic potential considered above,
\begin{equation}
V(\lambda) ={1\over 2} \lambda^2 + {t \over 4 N} \lambda^4,
\end{equation}
has good scaling properties. This can be interpreted as saying that
\begin{equation}
\label{mqmgs}
t= N g_s
\end{equation}
is the 't Hooft parameter of the model, which is kept fixed as $N\rightarrow \infty$.
After rescaling $\lambda \rightarrow {\sqrt{N}} \lambda$, we find that the one-body fermion problem reduces to
\begin{equation}
\label{hNeq}
\biggl\{ -{\hbar^2\over 2N^2} {{\rm d}^2 \over {\rm d} \lambda^2} +v(\lambda) \biggr\}\phi_n(\lambda)=e_n \phi_n(\lambda)
\end{equation}
where
\begin{equation}
e_n={1\over N} E_n
\end{equation}
and $E_n$ are the energy levels in the original one-body problem. The ground state energy is given by
\begin{equation}
\label{sumfermi}
E(t,N)=\sum_{n=1}^N E_n=N \sum_{n=1}^n e_n=N^2 {\cal E}_0(t) +\cdots
\end{equation}
To calculate ${\cal E}_0$, we note that the quantum
effects in (\ref{hNeq}) are controlled by $\hbar/N$. Therefore, large $N$ is equivalent to $\hbar$ small, and in the
large $N$ limit we can use the semiclassical or WKB approximation.
In particular, we can use the Bohr--Sommerfeld formula to find the energy spectrum at leading order in $\hbar/N$.
We will write this semiclassical quantization condition as
\begin{equation}
\label{leading}
N J(e_n)=n-{1\over 2}, \qquad n \ge 1,
\end{equation}
where
\begin{equation}
J(e)= {1\over \pi \hbar } \int_{\lambda_1(e)}^{\lambda_2(e)} {\rm d}
\lambda{\sqrt {2(e -v(\lambda))}}
\end{equation}
and $\lambda_{1,2}(e)$ are the turning points of the potential. If we denote
\begin{equation}
\label{xivar}
\xi={n-{1\over 2} \over N},
\end{equation}
we see that (\ref{leading}) defines implicitly a function $e(\xi)$ through
\begin{equation}
J\left( e(\xi)\right)=\xi.
\end{equation}
At large $N$, the spectrum becomes denser and denser, the discrete variable $\xi$ becomes
a continuous one $\xi \in [0,1]$, and the sum in (\ref{sumfermi}) becomes an integral through the rule
\begin{equation}
\label{sumrule}
\sum_{n=1}^N \rightarrow N \int_0^1 {\rm d} \xi.
\end{equation}
One then finds,
\begin{equation}
{\cal E}_0 = \int_0^1 {\rm d} \xi e (\xi).
\end{equation}
We also define the Fermi energy of the system by the condition
\begin{equation}
\label{fermicond}
J(e_F)=1.
\end{equation}
After some simple manipulations, one finds the expression
\begin{equation}
\label{finaleo}
{\cal E}_0= e_F-{1\over 3 \pi \hbar} \int_{\lambda_1(e_F)}^{\lambda_2(e_F)} {\rm d}
\lambda \Bigl[2(e_F-V(\lambda))\Bigr]^{3/2}.
\end{equation}
In the case of the quartic potential, the ground state energy can be explicitly computed as a function of the 't Hooft parameter. We first obtain the Fermi energy, which is defined by (\ref{fermicond}) (in this calculation we set $\hbar=1$).
For the quartic potential, the integral $J(e)$ can be easily computed in terms of elliptic functions. Let us denote,
\begin{equation}
\label{abmqm}
a^2=\frac{\sqrt{4 e t+1}-1}{t}, \qquad b^2=\frac{\sqrt{4 e t+1}+1}{t}.
\end{equation}
The turning points of the potential are $\pm a$. We also introduce the elliptic modulus
\begin{equation}
\label{qmodulus}
k^2={a^2 \over a^2 + b^2}.
\end{equation}
We then find
\begin{equation}
\label{iintegral}
J(e)={1\over 3 \pi} (2t)^{1\over 2} (a^2+b^2)^{1\over 2} \Bigl[ b^2 K(k) + (a^2-b^2) E(k)\Bigr],
\end{equation}
where $E(k)$, $K(k)$ are complete elliptic integrals. The condition (\ref{fermicond}) defines the Fermi energy $e_F(t)$ as an
implicit function of the 't Hooft parameter. The planar free energy is given by
\begin{equation}
{\cal E}_0(t)=e_F(t) -{1\over 3 \pi} \left( {t \over 2}\right) ^{3/2} {\cal I}(t, e_F(t)),
\end{equation}
and it involves the integral
\begin{equation}
\begin{aligned}
{\cal I} (t,e)&=\int_{-a}^a {\rm d} u \Bigl[(a^2-u^2)(b^2+u^2)\Bigr]^{3/ 2} \\
& ={2\over 35} {\sqrt{a^2 + b^2}} \Bigl\{ 2(a^2-b^2) (a^4 + 6 a^2 b^2+ b^4) E(k) +b^2 (2 b^4 +9 a^2 b^2 -a^4) K(k)\Big\}.
\end{aligned}
\end{equation}
The final result can be easily expanded in powers of $t$, and one finds
\begin{equation}
\label{planarser}
{\cal E}_0(t) ={1\over 2}+{t\over 8} - {17 t^2 \over 256}+{75 t^3\over 1024}-{3563 t^4 \over 32678} +{\cal O}(t^5).
\end{equation}
The first few terms are in perfect agreement with the calculation in planar perturbation theory (\ref{planarquarticg}).
One important remark on this result is that ${\cal E}_0(t)$ is an analytic function of $t$ at $t=0$, in accordance with the general result
explained above. This follows from the explicit expression for
${\cal E}_0$ in terms of elliptic functions. The radius of convergence of the expansion (\ref{planarser}) can be calculated by locating
the position of the nearest singularity $t_c$ in the $t$ plane. This singularity occurs when the modulus (\ref{qmodulus}) becomes $-\infty$, i.e. when
\begin{equation}
e_F (t_c)=-{1\over 4 t_c}.
\end{equation}
This is also the branch point in (\ref{abmqm}). It can be easily checked that this happens when
\begin{equation}
\label{conifold}
t_c=- {2 \sqrt{2} \over 3 \pi}.
\end{equation}
This has a nice interpretation in terms of the
fermion picture. Since $t_c$ is negative, it corresponds to an {\it inverted} quartic potential and to the precise value of the parameter at which the Fermi level reaches the maximum of the potential, see \figref{quarticQM}.
\begin{figure}[!ht]
\leavevmode
\begin{center}
\includegraphics[height=4.5cm]{quarticQM.pdf}
\end{center}
\caption{The Fermi level $e_F$ in the quartic potential with negative coupling $t<0$. The nearest singularity corresponds to the critical value in which $e_F$ reaches the
maximum of the potential.}
\label{quarticQM}
\end{figure}
We can now try to calculate large $N$ instanton effects in MQM. As in the case of standard QM, we consider an inverted quartic potential,
\begin{equation}
V(\lambda) ={1\over 2} \lambda^2 - {\kappa \over 4 N} \lambda^4,
\end{equation}
In this case, the vacuum at the origin is unstable and we should
expect an instanton configuration mediating vacuum decay. In principle, one should write down an instanton solution with
``small" action and calculate the
path integral around it. This solution can be found by tunneling one single eigenvalue of the matrix $M$, which has an action of order ${\cal O}(1)$, i.e.
we consider the matrix instanton,
\begin{equation}
M_c(t) ={\rm diag}\left(0, \cdots, 0, q_c(t), 0, \cdots, 0\right),
\end{equation}
where $q_c(t)$ is the bounce (\ref{qcsaddle}) with coupling $\lambda=g_s=\kappa/N$. In principle, one could expand the path integral of MQM
around this configuration and compute quantum planar fluctuations to determine the large
$N$ instanton action (this calculation was originally proposed in \cite{neuberger}).
However, the fermion picture, which gives us a compact way of computing the planar ground state energy,
should also give us an efficient way to compute the large $N$ instanton action in a single strike. In this picture, the ground state
is given by a filled Fermi level. As in any Fermi system, tunneling effects will first affect fermions which are near the Fermi surface. An instanton configuration with small action (i.e. of order ${\cal O}(N^0)$) can then be obtained by tunneling a single fermion out of the $N$ particles in the Fermi gas. Since at large $N$ we can use semiclassical methods, the instanton action of such a fermion is
just given by the standard WKB action,
\begin{equation}
\label{instaction}
{A(\kappa) \over g_s} = N \left( 2 \kappa \right)^{1/2} \int_a^b {\rm d} \lambda \, \sqrt{(\lambda^2-a^2)(b^2 - \lambda^2)},
\end{equation}
where $a,\, b$ are the turning points associated to the Fermi energy $e_F$, and they are non-trivial functions
of the 't Hooft parameter $\kappa$. They are defined by the equations (\ref{abmqm}) with $t=-\kappa$ and an extra minus sign for $b^2$. There is an extra factor of $2$ due
to the symmetry of the problem, and the factor of $N$ is due to the fact that the effective
Planck constant in this problem is $1/N$, as we remarked in (\ref{hNeq}) (we are setting $\hbar=1$).
The integral in (\ref{instaction}) can be explicitly computed by using elliptic functions, and the final result
is
\begin{equation}
\label{mqmins}
A(\kappa)={1\over 3} (2\kappa^3)^{1/2} b \Bigl[ (a^2+b^2) E(k)-2 a^2 K(k)\Bigr],
\end{equation}
where the elliptic modulus is now given by
\begin{equation}
k^2={b^2-a^2 \over b^2}.
\end{equation}
The above function has the following expansion around $\kappa=0$,
\begin{equation}
A(\kappa) ={4 \over 3} -\kappa \log\left( {16 e \over \kappa}\right) +{17\kappa^2 \over 16} + {125 \kappa^3 \over 128} +\cdots.
\end{equation}
This is precisely the expected structure (\ref{exlna}) for a large $N$ instanton action: the leading term is the action for the instanton (\ref{bounceaction})
in the $N=1$ quantum mechanical problem, the $\log$ term is a one-loop factor in disguise, and the rest of the series is a sum of loop corrections
in the background of the ``classical" instanton. An interesting
property of $A(\kappa)$ is that it {\it vanishes} at the critical value
\begin{equation}
\kappa_c=-t_c={2 {\sqrt{2} \over 3 \pi}},
\end{equation}
which is indeed the singularity (\ref{conifold}) in the complex $t$ plane signaling the convergence radius of the genus
$g$ ground state energies. It corresponds to the critical point at which the Fermi sea reaches the local maximum; at this point the action for tunneling must indeed vanish
since the endpoints in (\ref{instaction}) collide: $a(t_c)=b(t_c)$.
Although we have just computed here the leading order term in the $1/N$ expansion of the ground state energy,
there is a full series of higher genus corrections to this result ${\cal E}_g(t)$, $g\ge 1$, which play the r\^ole of the
genus corrections to the free energy discussed above. One would expect that these corrections display the asymptotic
behavior (\ref{largeg}), where $A(t)$ is the action of the large $N$ instanton calculated in (\ref{mqmins}). This was indeed verified in \cite{mpmqm}.
In \cite{wadia2}, Wadia analyzed a closely related model which shares some properties with the quartic model with negative coupling
considered above. The model studied in \cite{wadia2} is matrix quantum mechanics on a circle, with a cosine potential, and there are two phases separated by a
third order phase transition which occurs when the Fermi level reaches the maximum of the potential. As shown in \cite{neuberger}, the action of the instanton
mediating the tunneling vanishes at the transition point. The model is therefore similar to the one considered above, but due to the compactness of configuration space
there is no instability: at the critical value of the 't Hooft parameter we rather have a reorganization of the Fermi sea.
\sectiono{Non-perturbative effects in matrix models}
\subsection{Matrix models at large $N$: General aspects}
In this subsection we review some basic facts about matrix models. For more detailed expositions and explanations, the reader is encouraged to look at \cite{dfgzj,mmleshouches,eoreview}.
We will consider matrix models for an $N \times N$ Hermitian matrix $M$, with a potential $V(M)$. For the moment being
we will assume that this is a polynomial potential,
\begin{equation}
V(\lambda)={1 \over 2}\lambda^2+ \sum_{p\ge 3} {g_p\over p} \lambda^p,
\end{equation}
where the $g_p$ are coupling constants of the model. The partition function is defined by
\begin{equation}\label{matrix}
Z(N, g_s)={1 \over {\rm vol}(U(N))}\int {\rm d} M\, {\rm e}^{-{1\over g_s}{\rm Tr} V(M)},
\end{equation}
where $g_s$ is an additional coupling constant, sometimes referred to as the string coupling constant.
Matrix models have a $U(N)$ ``gauge" symmetry
\begin{equation}
M \rightarrow U M U^{\dagger},
\end{equation}
therefore one can go to the ``diagonal gauge" and write this partition function in terms of the
eigenvalues of $M$, denoted by $\lambda_i$ (this is very similar to the gauge (\ref{Mdiag}) in matrix quantum mechanics).
The resulting $N$-dimensional integral is given by
\begin{equation}
\label{zmm}
Z_{\gamma}(N, g_s)={1\over N!} \int_{\gamma} \prod_{i=1}^N {{\rm d} \lambda_i \over 2\pi} \, \Delta^2(\lambda) \, {\rm e}^{-{1\over g_s} \sum_{i=1}^N V(\lambda_i)},
\end{equation}
where $\Delta(\lambda)$ is the Vandermonde determinant introduced in (\ref{vander}).
Here $\gamma$ is a contour in the complex plane, which we take to be the same for the $N$ eigenvalues,
and which makes the integral convergent.
Let us denote the critical points of $V(\lambda)$ by
\begin{equation}
\lambda^\star_1, \cdots, \lambda^\star_d.
\end{equation}
It is possible \cite{felder} to choose $d$ integration contours $\gamma_k$ in the complex plane,
$k=1, \cdots, d$, going to infinity in directions where $\exp(-V(\lambda)/g_s)$ decays exponentially, and in such a way
that each of them passes through exactly one of the $d$ critical points and is a steepest descent contour. In general, the contours depend on the argument of $g_s$,
but we can always make a change of variables so as to reabsorb this phase in the coefficients of $V(\lambda)$. The original contour $\gamma$ can be written as a
linear combination of such contours,
\begin{equation}
\gamma =\sum_{k=1}^d C_k \gamma_k.
\end{equation}
We can then write
\begin{equation}
\label{sumz}
Z_{\gamma}(N, g_s) =\sum_{N_1 +\cdots +N_d=N} C_1^{N_1} \cdots C_d^{N_d} Z(N_1, \cdots, N_d).
\end{equation}
In this formula,
\begin{equation}
\label{genz}
Z(N_1, \cdots, N_d)={1\over N_1! \cdots N_d!} \int_{\lambda^{(1)}_{i_1} \in \gamma_1} \cdots \int_{\lambda^{(d)}_{i_d} \in \gamma_d}
\prod_{i=1}^N {{\rm d}\lambda_i \over 2 \pi}\, \Delta^2(\lambda) {\rm e}^{-{1\over g_s} \sum_{i=1}^N
V(\lambda_i)},
\end{equation}
and we have split the eigenvalues in $d$ sets,
\begin{equation}
\{ \lambda^{(k)}_{i_k}\}_{i_k=1, \cdots, N_k}, \quad k=1, \cdots, d.
\end{equation}
Each integral (\ref{genz}) defines a possible ``background" or vacuum
of the original matrix model. There are three important remarks to be made about the above expansion:
\begin{enumerate}
\item The backgrounds (\ref{genz}) of the matrix model are in one-to-one correspondence with
gauge symmetry breaking patterns,
\begin{equation}
U(N) \rightarrow U(N_1) \times \cdots \times U(N_d).
\end{equation}
This is similar to the vacuum structure (\ref{lens-split}) of CS theory on lens spaces $L(d,1)$. We will denote these vacua by
\begin{equation}
(N_1, \cdots, N_d).
\end{equation}
\item These sectors appear as an artifact of the saddle-point approximation. The original integral (\ref{zmm}) is perfectly well-defined,
and it is only our will to treat the integral in this approximation which led to the appearance of these sectors.
\item The expression (\ref{sumz}) is the analogue in random matrix theory of a {\it trans-series solution}.
\end{enumerate}
A choice of background in the matrix model will define a choice of a perturbative sector. As we will see, the remaining sectors can then be regarded as instanton
sectors w.r.t. the chosen background. A background of the form
\begin{equation}
(N, 0, \cdots, 0)
\end{equation}
is called a {\it one-cut background}, for reasons which will be clear in a moment, while the generic background will be called a {\it multi-cut background}.
Let us now choose a fixed background and let us consider the integral (\ref{genz}) on this background. It can be computed in a
saddle-point expansion at small $g_s$ but keeping fixed the so-called {\it partial 't Hooft couplings}
\begin{equation}
\label{partial-thooft}
t_i=g_s N_i.
\end{equation}
This means that we are doing a large $N$ expansion of the matrix integral. The total 't Hooft parameter is
\begin{equation}
t=g_s N=\sum_{i=1}^d N_i.
\end{equation}
One can use the standard large $N$ counting arguments to see that the structure of the free energy is of the form
\begin{equation}
\label{largeNas}
\log Z(N_1, \cdots, N_d) =\sum_{g=0}^{\infty} g_s^{2g-2} F_g(t_1, \cdots, t_d) .
\end{equation}
In the one-cut case $d=1$ this is the standard $1/N$ counting briefly reviewed in the previous section.
In the multi-cut case one has to do a slightly refined analysis, see the
Appendix in the published version of \cite{bde} for an explicit derivation.
\begin{example} {\it The Gaussian matrix model}. The Gaussian matrix model is defined by the matrix integral (\ref{zmm}) with the potential
\begin{equation}
V(\lambda)={\lambda^2 \over 2}.
\end{equation}
If $g_s>0$, the integration contour is simply $\gamma={\mathbb R}$, the real axis. We will denote the partition function of this model by $Z^{\rm G}(N,g_s)$.
This is one of the few cases in which the matrix integral can be computed exactly at finite $N$,
\begin{equation}
\label{gaussianN}
Z^{\rm G}(N,g_s)= {g_s^{N^2/2} \over (2\pi)^{N/2}}\, G_2(N+1),
\end{equation}
where $G_2(N+1)$ is the Barnes function
\begin{equation}
G_2(N+1)=\prod_{i=0}^{N-1} i!.
\end{equation}
In this case, the large $N$ expansion (\ref{largeNas}) follows from the asymptotics of this function. One finds,
\begin{equation}
\label{gaussiang}
\begin{aligned}
F^{\rm G}_0(t)&={1\over 2} t^2 \Bigl( \log \, t -{3 \over 2} \Bigr), \\
F^{\rm G}_1(t)&=-{1\over 12} \log \, t +{1\over 12} \log \, g_s +\zeta'(-1),\\
F^{\rm G}_g(t)&= {B_{2g} \over
2g (2g-2)} t^{2-2g}, \quad g>1,
\end{aligned}
\end{equation}
where $B_{2g}$ are Bernoulli numbers. Notice that $F^{\rm G}_1(t)$ depends also on $g_s$, but usually this piece (as well as the constant involving the zeta function) is not taken into account.
\end{example}
The genus $g$ free energies $F_g(t_1, \cdots, t_d)$ appearing in (\ref{largeNas}) can be expanded around $t_i=0$. The first terms in this expansion are just the Gaussian
free energies for the different $t_i$'s. Once these terms are subtracted, the resulting quantities
\begin{equation}
\label{g-sub}
F_g(t_1, \cdots, t_d)-\sum_{i=1}^d F^{\rm G}_g(t_i)
\end{equation}
are {\it analytic} at the origin, in agreement with the general arguments put forward in the previous section. We will regard (\ref{largeNas}) as
our perturbative expansion. It is interesting to note that this expansion
is a generalization/deformation of the standard saddle-point expansion of one-dimensional integrals.
Indeed, let us write (\ref{genz}) as
\begin{equation}
{1\over N_1! \cdots N_d!} \int_{\lambda^{(1)}_{i_1} \in \gamma_1} \cdots \int_{\lambda^{(d)}_{i_d} \in \gamma_d}
\prod_{i=1}^N {{\rm d}\lambda_i \over 2 \pi}\, \exp\left\{-{1\over g_s} \left( \sum_{i=1}^N V(\lambda_i)-{t\over N} \sum_{i\not=j}\log \left(\lambda_i -\lambda_j\right)^2 \right) \right\}.
\end{equation}
We want to take a limit where $g_s$ is small and $t$ is fixed, so that $N$ is large. The two terms inside the parenthesis are then of the same order (i.e. ${\cal O}(N)$) and it is
clear that the 't Hooft parameter controls the strength of the Vandermonde interaction. Let us suppose
that $t$ is very small, so that we can neglect this interaction. In this limit (\ref{genz}) factorizes into a product of
standard saddle-point integrals,
\begin{equation}\label{limitZ}
Z(N_1, \cdots, N_d) \to \prod_{i=1}^d \left( f_{\gamma_i}(g_s)\right)^{N_i}, \qquad t \to 0,
\end{equation}
where
\begin{equation}
f_{\gamma_i}(g_s)=\int_{\gamma_i} {{\rm d} \lambda \over 2 \pi} {\rm e}^{-{1\over g_s} V(\lambda)} \approx {\rm e}^{-{1\over g_s} V(\lambda^\star_i)}
\end{equation}
and we approximated this integral by a saddle-point expansion around $\lambda^\star_i$. When $t$ is no longer small,
we have to take into account the Vandermonde determinant. Since this induces a repulsion between eigenvalues,
they will no longer sit at the saddle points of $V(\lambda)$: the $N_k$ eigenvalues in the $k$-th set will sit at an interval or arc ${\cal C}_k$ around the $k$-th
saddle-point. As we will see in a moment, when $N$ is large but the 't Hooft parameters $t_i$ are fixed, these arcs are compact, and the distribution of the eigenvalues on
these arcs is given by a {\it density function} $\rho(\lambda)$, whose support is the union of the intervals. The problem of finding this equilibrium distribution can then be regarded as
a deformation of the saddle-point technique for standard integrals, where the deformation parameters are the 't Hooft parameters $t_i$.
\subsection{The one-cut solution}
The determination of the free energies $F_g(t_i)$, for a given matrix model potential and background,
has a long story, which starts in \cite{bipz} and culminates in \cite{eo}. We will now review some of the relevant results,
which we will need to develop the instanton calculus in matrix models.
To begin with, we will consider as our background a one-cut background. In this case, there is a single
't Hooft parameter and the free energy has a perturbative genus expansion of the form
\begin{equation}\label{oneovern}
F = \sum_{g=0}^{\infty} F_g(t)\, g_s^{2g-2}.
\end{equation}
Another important set of quantities in a matrix model are the connected correlation functions
\begin{equation}\label{wcor}
W_h (p_1, \ldots, p_h) = \left\langle {\rm Tr}\, {1\over p_1-M} \cdots {\rm Tr}\, {1\over p_h-M} \right\rangle_{(\mathrm{c})},
\end{equation}
\noindent
where the subscript $(\mathrm{c})$ means connected. These correlation functions are generating functions for multi--trace correlators of the form
\begin{equation}\label{scor}
W_h (p_1, \ldots, p_h) = \sum_{n_i \ge 1} \frac{1}{p_1^{n_1+1} \cdots p_h^{n_h+1}}\, \left\langle {\rm Tr}\, M^{n_1} \cdots {\rm Tr}\, M^{n_h} \right\rangle_{(\mathrm{c})},
\end{equation}
\noindent
and they have a $g_s$ expansion of the form
\begin{equation}
\label{wgh}
W_h (p_1, \ldots, p_h) = \sum_{g=0}^{\infty} g_s^{2g+h-2} W_{g,h} (p_1, \ldots, p_h).
\end{equation}
At large $N$, the one-cut background is characterized by a density of eigenvalues $\rho(\lambda)$ which
has support on a single, connected interval ${\cal C}=[a,b]$ in the complex plane. This density is completely determined by the condition
that the so-called {\it effective potential} on an eigenvalue,
\begin{equation}\label{veff}
V_{\rm eff}(\lambda) = V(\lambda) - 2t \int {\rm d} \lambda'\, \rho(\lambda') \log |\lambda -\lambda'|,
\end{equation}
\noindent
has to be {\it constant} --at fixed 't~Hooft coupling-- on the interval ${\cal C}$:
\begin{equation}\label{vconst}
V_{\rm eff}(\lambda) = t \xi(t), \qquad \lambda \in {\cal C}.
\end{equation}
\noindent
A quantity which is closely related to the density of eigenvalues is the {\it planar resolvent}, which is nothing but the quantity $W_{0,1}(p)/t$, where $W_{0,1}(p)$ has been introduced in (\ref{wgh}).
The planar resolvent can be computed as
\begin{equation}\label{zeroresint}
\omega_0(p) =\int {\rm d} \lambda \,{\rho (\lambda)\over p -\lambda}.
\end{equation}
It also satisfies the asymptotic condition
\begin{equation}
\label{asymres}
\omega_0(p) \sim {1 \over p}, \qquad p\rightarrow \infty,
\end{equation}
which follows from the normalization of the density function
\begin{equation}
\int_{\cal C} {\rm d} \lambda \, \rho(\lambda)=1.
\end{equation}
Once the resolvent is known, the eigenvalue density follows as
\begin{equation}\label{rhow}
\rho(\lambda) = - {1 \over 2 \pi {\rm i}} \bigl( \omega_0 (\lambda + {\rm i}\epsilon) - \omega_0 (\lambda - {\rm i} \epsilon) \bigr).
\end{equation}
It can be seen that the condition (\ref{vconst}) determines the resolvent as
\begin{equation}
\label{solwo}
\omega_0(p) ={1 \over 2t} \oint_{\cal C} {{\rm d} z \over 2 \pi {\rm i}} { V'(z) \over p-z} \biggl( { (p-a)(p-b)\over
(z -a) (z -b)}\biggr)^{1 \over 2} .
\end{equation}
Here the integration is around a closed contour which encircles the cut ${\cal C}$. By deforming the integration contour, this solution can also be written as,
\begin{equation}
\label{res-sc}
\omega_0(p) ={1\over 2t} \bigl( V'(p) - y(p) \bigr),
\end{equation}
where $y(p)$ is a function on the complex plane which has a branch cut along ${\cal C}$, called the \textit{spectral curve} of the matrix model. In the one-cut case, the spectral curve has the structure
\begin{equation}\label{scurve}
y(p) = M(p) {\sqrt{(p-a)(p-b)}},
\end{equation}
\noindent
where $M(p)$, known as the \textit{moment function}, is given by
\begin{equation}\label{momentf}
M(p) = \oint_{\infty} {{\rm d} z \over 2 \pi {\rm i}}\, {V'(z) \over z-p}\, {1 \over{\sqrt{(z-a)(z-b)}}},
\end{equation}
with the contour of integration being around the point at $\infty$. The endpoints of the cut follow from the asymptotic behavior of the resolvent (\ref{asymres}),
leading to the equations
\begin{equation}\label{endpo}
\begin{aligned}
\oint_{\cal C} {{\rm d} z \over 2\pi {\rm i}}\, {V'(z) \over {\sqrt{(z-a)(z-b)}}}&=0, \\
\oint_{\cal C}{{\rm d} z \over 2\pi {\rm i}}\, {z V'(z) \over {\sqrt{(z-a)(z-b)}}}&=2t.
\end{aligned}
\end{equation}
\noindent
It turns out that, with the exception of the free energies at genus zero and one, and the planar one-point function $W_{0,1}(p)$,
the quantities $F_g(t)$ and $W_{g,h} (p_1, \ldots, p_h)$
can be computed in terms of the spectral curve alone. More precisely, knowledge of the endpoints of the cut, $a$ and $b$, and of the moment function, is all one needs in order to compute them. This was first made clear in \cite{bipz,ajm,ackm} and
later culminated in the geometric formalism of \cite{eynard, ce,eo}. For example, the two--point correlator at genus zero is given by \cite{ajm}
\begin{equation}\label{annmm}
W_{0,2} (p,q) = {1 \over 2 (p-q)^2} \left( {p q - {1\over 2} (p+q) (a+b) + ab \over {\sqrt{(p-a)(p-b)(q-a)(q-b)}}} - 1 \right),
\end{equation}
and only depends on the end-points of the cut. On the other hand, the genus--zero free energy, $F_0(t)$ is given by
\begin{equation}\label{planarf0}
F_0(t) = -{t\over 2} \int_{\mathcal C} {\rm d}\lambda\, \rho (\lambda) V (\lambda) - {1\over 2} t^2 \xi(t).
\end{equation}
The derivatives of the planar free energy can be computed in terms of the effective potential. One finds (see for example \cite{iy})
\begin{equation}\label{fzeroders}
\begin{aligned}
\partial_t F_0 (t) &= - t \xi(t) = - V_{\mathrm{eff}} (b), \\
\partial_t^2 F_0 (t) &= - \partial_t V_{\mathrm{eff}} (b) = 2 \log { b-a \over 4}.
\end{aligned}
\end{equation}
\subsection{The multi-cut solution}
We will now consider the more general (and more difficult) background: the multi-cut solution. In this case the support of the eigenvalue
distribution is a disjoint union of $n$ intervals
\begin{equation}
{\cal C}=\bigcup_{i=1}^n {\cal C}_i, \quad {\cal C}_i= [x_{2i-1}, x_{2i}].
\end{equation}
The density $\rho(\lambda)$ now satisfies the equation,
\begin{equation}
\label{cpot}
V_{\rm eff}(\lambda)=\Gamma_i, \qquad \lambda \in {\cal C}_i,
\end{equation}
where $\Gamma_i$ are constants in each interval ${\cal C}_i$. This generalizes (\ref{vconst}) to the multi-cut case.
The way to implement the multi--cut solution at the level of the planar resolvent is to require $\omega_0(p)$
to have $2n$ branch points. The solution now reads,
\begin{equation}
\label{solwmulti}
\omega_0(p) ={1 \over 2t} \oint_{\cal C} {{\rm d} z \over 2 \pi {\rm i}} { V'(z) \over p-z}
\left( \prod_{k=1}^{2n} { p-x_k\over
z-x_k}\right)^{1 /2}.
\end{equation}
This can be also written as in (\ref{res-sc}), in terms of a spectral curve, which is now hyperelliptic,
\begin{equation}
y(p)=M(p) \prod_{k=1}^{2n} {\sqrt{p-x_k}}.
\end{equation}
Here, $M(p)$ is the multi-cut moment function, which is given by the obvious generalization of (\ref{momentf}) to the multi-cut case.
In order to satisfy the asymptotics (\ref{asymres}) the following conditions must hold:
\begin{equation}
\label{splusone}
\delta_{\ell n}={1\over 2t} \oint_{\cal C} {{\rm d} z \over 2 \pi {\rm i}} {z^{\ell} V'(z)
\over \prod_{k=1}^{2n} (z-x_k)^{1\over 2}}, \qquad \ell=0,1, \cdots, n.
\end{equation}
In contrast to the one-cut case, these are only $n+1$ conditions for the $2n$ variables
$x_k$ representing the endpoints of the cut. The remaining $n-1$ conditions are obtained by fixing the values of the partial 't Hooft parameters:
\begin{equation}
t_i=g_s \int_{{\cal C}_i} {\rm d}\lambda \, \rho(\lambda).
\end{equation}
The planar free energy satisfies in addition the equation,
\begin{equation}
\label{pdF}
{\partial F_0 \over \partial t_i}-{\partial F_0 \over \partial t_{i+1}} =\Gamma_{i+1}-\Gamma_i,
\end{equation}
where $\Gamma_i$ are the quantities appearing in (\ref{cpot}).
\FIGURE{
\includegraphics[height=3cm]{twocuts.pdf}
\caption{A two-cut spectral curve, showing two contours ${\cal C}_{1,2}$ around the cuts where $N_{1,2}$ eigenvalues sit. The ``dual" cycle ${\cal D}_1$ goes from ${\cal C}_2$ to ${\cal C}_1$.}
\label{twocuts}
}
We can write the multi-cut solution in a very elegant way by using contour integrals of the spectral curve.
First, the partial 't Hooft parameters are given by
\begin{equation}
\label{tper2}
t_i=-{1\over 4 \pi {\rm i}} \oint_{{\cal C}_i} y(p){\rm d} p.
\end{equation}
We now introduce dual cycles ${\cal D}_i$ cycles, $i=1, \cdots, n-1$, going from the ${\cal C}_{i+1}$ cycle to the ${\cal C}_{i}$ cycle counterclockwise, see \figref{twocuts}. In terms of these, we can write (\ref{pdF}) as
\begin{equation}
\label{dfy}
{\partial F_0 \over \partial t_i}-{\partial F_0 \over \partial t_{i+1}} ={1\over 2} \oint_{{\cal D}_i} y(p) {\rm d} p.
\end{equation}
\subsection{Large $N$ instantons and eigenvalue tunneling}
In the sum (\ref{genz}) over possible ``vacua" of the matrix model, any two vacua are related by a redistribution of the eigenvalues, i.e. by a sequence of operations of the form
\begin{equation}
\label{twovac}
N_i \rightarrow N_i-1, \qquad N_j \rightarrow N_j+1.
\end{equation}
This can be interpreted as a process in which an eigenvalue leaves the cut around the $i$-th critical point and ``tunnels" to the cut around the $j$-th critical point.
Therefore, given a choice of background or perturbative vacuum, all the other vacua in (\ref{genz}) (which are then regarded as instanton sectors) can be obtained from the reference background by {\it eigenvalue tunneling}. The fact that instantons in the matrix model are due to eigenvalue tunneling was first pointed out in \cite{david, shenker}.
Notice that, when an eigenvalue tunnels as in (\ref{twovac}), the r.h.s. of (\ref{limitZ}) changes, at leading order, by
\begin{equation}
\label{camm}
\exp\left( -{1\over g_s} (V(\lambda^\star_j) -V(\lambda^\star_i)) \right).
\end{equation}
We can think about this as the action of the classical instanton connecting the two vacua:
\begin{equation}
(\cdots, N_i, \cdots, N_j, \cdots) \to (\cdots, N_i-1, \cdots, N_j+1, \cdots).
\end{equation}
As we have explained in the previous section, there is a large $N$ instanton built upon this classical configuration.
The classical action (\ref{camm}) gets corrected to
\begin{equation}
\exp\left( -{1\over g_s} (V_{\rm eff} (\lambda^\star_j) -V_{\rm eff}(\lambda^\star_i)) \right),
\end{equation}
where the effective potential is defined in (\ref{veff}). This can be regarded as the action of the large $N$ instanton
connecting the vacua. It follows from (\ref{pdF}) and (\ref{cpot}) that the large $N$ instanton action can be computed in terms of the genus zero free energy as
\begin{equation}
V_{\rm eff} (\lambda^\star_j) -V_{\rm eff}(\lambda^\star_i)={\partial F_0 \over \partial t_i} -{\partial F_0 \over \partial t_j}.
\end{equation}
When $t\to 0$, we recover the classical instanton action (\ref{camm}).
The picture of eigenvalue tunneling is still valid for finite $t$, but now the tunneling is between
two cuts rather than between two saddle points. A graphical depiction of an $\ell$-eigenvalue tunneling in a cubic potential,
from the background $(N,0)$ to the background $(N-\ell, \ell)$, is shown in \figref{tunnel}.
\begin{figure}[!ht]
\leavevmode
\begin{center}
\includegraphics[height=3cm]{cubictunnel.pdf}
\end{center}
\caption{Eigenvalue tunneling in a cubic matrix model.}
\label{tunnel}
\end{figure}
\subsection{Large $N$ instantons in the one-cut matrix model}
A particularly simple case of eigenvalue tunneling
occurs when the reference background is the one-cut configuration.
The elementary instanton configurations correspond in this case to tunnelings of the form
\begin{equation}
(N, 0, \cdots, 0) \rightarrow (N-1, 0, \cdots, 0, 1, 0, \cdots, 0).
\end{equation}
The large $N$ instanton action depends then only on $t=g_s N$ and it has the structure discussed in the
previous section. Since this is the simplest case, we will now explain
how to calculate instanton contributions when the reference background is a one-cut solution. It turns out that this computation can be done in
at least three different ways. The first method is based on a direct calculation of the matrix integral.
It was introduced by F. David in \cite{david} and further clarified in \cite{lvm,iy}. The calculation of \cite{david,lvm,iy} is done for the so-called double-scaled
matrix model, in which the 't Hooft parameter is near a critical value. The correct calculation of the instanton contribution at generic values of the 't Hooft parameter
was presented in \cite{mswone}. There is a second method, which regards the instanton sectors in the one-cut matrix model as limits of the generic multi-cut vacuum. This method has the advantage of giving general expressions for the $k$-th instanton sector, and it was presented in \cite{mswtwo}. Finally, a third method was introduced in \cite{mmnp}, based on the method of orthogonal polynomials, which makes it possible to calculate the instanton sectors as trans-series solutions to difference equations, and therefore it mimicks the structure
developed for ODEs. We will present the three methods in turn, skipping some of the details which can be found in the original references.
\subsubsection{A direct calculation}
\label{direct-cal}
\begin{figure}[!ht]
\leavevmode
\begin{center}
\includegraphics[height=5cm]{mminstanton.pdf}
\end{center}
\caption{The effective potential for the one-cut matrix model. The point $x_0$ is a critical point where eigenvalues can tunnel to, leading to instanton configurations.}
\label{mmeffpot}
\end{figure}
We thus consider a one--cut matrix model in which the effective potential has the form depicted in \figref{mmeffpot}: it is constant along the location of the cut ${\cal C}=[a,b]$, and there is a critical point $x_0$ which corresponds to another possible vacuum. We will consider the instanton sectors corresponding to eigenvalue tunneling from the cut at ${\cal C}$ to the critical point $x_0$. These instanton sectors are labelled by a positive integer $\ell$, which is the number of tunneling eigenvalues. The total partition function, summing over all these instanton sectors, is given by
\begin{equation}
\label{z-trans}
Z(N)= Z^{(0)}(N) \left(1 + \sum_{\ell =1}^\infty C^{\ell} Z^{(\ell)}(N) \right),
\end{equation}
where for convenience we have implicitly normalized the sectors with $\ell\not=0$ by the perturbative, one-cut partition function $Z^{(0)}(N)$, and $C$ is a constant. Notice that this total partition function has the trans-series structure considered in section \ref{ODEs}. The free energy also has a trans-series structure,
\begin{equation}
\label{f-trans}
F(N) = F^{(0)}(N) + \log\left(1 + \sum_{\ell =1}^\infty C^{\ell} Z^{(\ell)}(N)\right)=\sum_{\ell =0}^\infty C^{\ell} F^{(\ell)}(N).
\end{equation}
The instanton partition functions $Z^{(\ell)}(N)$ can be computed directly in terms of the defining matrix integral: we consider an integral in which $N-\ell$ eigenvalues are located in the cut ${\cal C}$, while for the $\ell$ eigenvalues which have tunneled, the integration is made around a steepest-descent contour passing through the critical point. In particular, for the one-instanton sector one finds \cite{lvm}
\begin{equation}\label{onemm}
Z^{(1)}(N) = {N \over N! (2\pi)^N Z^{(0)}(N)} \int_{x\in {\cal I}} {\rm d} x \, {\rm e}^{-{1\over g_s} V (x)}
\int_{\lambda \in {\cal I}_0} \prod_{i=1}^{N-1}{\rm d} \lambda_i\, \Delta^2 (x, \lambda_1, \ldots, \lambda_{N-1})\, {\rm e}^{-{1\over g_s} \sum_{i=1}^{N-1} V (\lambda_i)},
\end{equation}
\noindent
where the first integral in $x$ is around the critical point at $x_0$, along a saddle-point contour which we have denoted by ${\cal I}$. The rest of the $N-1$ eigenvalues are integrated around the contour ${\cal I}_0$ corresponding to the background one-cut configuration. The overall factor of $N$ in front of the integral is a symmetry factor, counting the $N$ possible distinct ways of choosing one eigenvalue out of a set of $N$. One can easily write similar integrals for the $\ell$--instanton contribution, but these will be easier to calculate with the second method below. The integral (\ref{onemm}) can be written as
\begin{equation}\label{oneinstex}
Z^{(1)}(N) = {1\over 2\pi}\, {Z^{(0)}(N-1) \over Z^{(0)}(N)} \int_{x \in {\cal I}} {\rm d} x\, f(x),
\end{equation}
where
\begin{equation}\label{fx}
f(x) = \left\langle \det (x {\bf 1} - M')^2 \right\rangle^{(0)}_{N-1}\, {\rm e}^{-{1\over g_s} V(x)}.
\end{equation}
The notation in these equations is as follows. The average is defined as
\begin{equation}
\left\langle {\cal O} \right\rangle^{(0)}_N = {\int_{\lambda\in {\cal I}_0} \prod_{i=1}^N {\rm d} \lambda_i\, \Delta^2(\lambda)\, {\cal O}(\lambda)\, {\rm e}^{-{1\over g_s} \sum_{i=1}^N V(\lambda_i)} \over \int_{\lambda\in {\cal I}_0} \prod_{i=1}^N {\rm d} \lambda_i\, \Delta^2(\lambda)\, {\rm e}^{-{1\over g_s} \sum_{i=1}^N V(\lambda_i)}}
\end{equation}
and it is calculated again in the one-cut bacgrkound. In (\ref{fx}), $M'$ is an $(N-1) \times (N-1)$ hermitian matrix.
We conclude that the one-instanton sector can be evaluated by calculating correlators in the perturbative sector.
In fact, by making use of the familiar relation
\begin{equation}
\det (x {\bf 1} - M) = \exp\left({\rm tr}\,{\rm ln} (x {\bf 1}-M)\right)
\end{equation}
we obtain
\begin{equation}\label{conex}
\left\langle \det (x {\bf 1} - M)^2 \right\rangle = \exp \left[ \sum_{s=1}^{\infty} {2^s\over s!} \left\langle \left( {\rm tr}\, {\rm ln} (x {\bf 1} - M) \right)^s \right\rangle_{(\rm c)} \right],
\end{equation}
\noindent
which is written in terms of connected correlation functions. The correlation functions appearing in (\ref{conex}) are nothing but integrated versions of the $W_{h}$ correlators in (\ref{wcor}), evaluated at coincident points. Let us define
\begin{equation}\label{theas}
\begin{aligned}
A_{g,h} (x;t) &= \left. \int^{x_1} {\rm d} p_1 \cdots \int^{x_h} {\rm d} p_h\, W_{g,h} (p_1,\cdots, p_h) \right|_{x_1=\cdots =x_h=x}, \\
{\cal A}_n(x;t) &= \sum_{k=0}^{\left[\frac{n}{2}\right]} \frac{2^{n-2k+1}}{(n-2k+1)!}\, A_{k,n-2k+1} (x;t), \quad n\ge 1.
\end{aligned}
\end{equation}
\noindent
In this notation, the general perturbative formula for the determinant reads
\begin{equation}
\left\langle \det ( x \mathbf{1} - M )^2 \right\rangle = \exp \left( \sum_{n=0}^{\infty} g_s^{n-1} {\cal A}_n (x;t) \right),
\end{equation}
\noindent
where ${\cal A}_n(x;t)$ is the $n$--loop contribution. We have, for example,
\begin{equation}
{\cal A}_0(x;t) = 2 A_{0,1} (x;t), \qquad
{\cal A}_1(x;t) = 2 A_{0,2} (x;t).
\end{equation}
The integration constants involved in the integrations in (\ref{theas}) may be simply fixed by looking at the large $x$ expansion of the correlators.
Next, we define the \textit{holomorphic} effective potential, which combines the matrix model potential together with ${\cal A}_0(x;t)$, as
\begin{equation}\label{vheff}
V_{\rm h,eff}(x;t) = V(x) - 2t \int^x {\rm d} p\, \omega_0 (p) = V(x) - 2t \int {\rm d} p\, \rho(p) \log (x -p).
\end{equation}
It satisfies
\begin{equation}\label{dery}
V_{\rm h,eff}' (x;t) = y(x)
\end{equation}
\noindent
as well as
\begin{equation}
{\rm Re}\, V_{\rm h,eff}(x;t) = V_{\rm eff}(x),
\end{equation}
\noindent
where $V_{\rm eff}(x)$ was earlier defined in (\ref{veff}). Altogether, one finally has for the integrand
\begin{equation}\label{fint}
f(x) = \exp \left( - \frac{1}{g_s} V_{\rm h,eff} (x;t') + \sum_{n=1}^{\infty} g_s^{n-1} {\cal A}_n (x;t') \right),
\end{equation}
\noindent
where
\begin{equation}
t'=g_s(N-1)=t-g_s.
\end{equation}
\noindent
This shift in the 't~Hooft parameter is due to the fact that the correlation function involved in (\ref{fx}) is computed in a matrix model with $N-1$
eigenvalues (recall we removed one eigenvalue from the cut). Since we are computing the one--instanton contribution in the theory with $N$ eigenvalues, we thus have to expand (\ref{fint}) around $t$. This gives further corrections in $g_s$, and one finds
\begin{equation}\label{fvphi}
f(x) = \exp \left( - \frac{1}{g_s} V_{\mathrm{h, eff}} (x) + \Phi (x) \right),
\end{equation}
with
\begin{equation}
\Phi (x) \equiv \sum_{n=1}^{\infty} g_s^{n-1}\, \Phi_{n} (x)= \sum_{n=1}^{\infty} g_s^{n-1} \left[ \frac{(-1)^{n-1}}{n!}\, \partial_t^n V_{\mathrm{h,eff}} (x) + \sum_{k=0}^{n-1} \frac{(-1)^k}{k!}\, \partial_t^k {\cal A}_{n-k} (x) \right].
\end{equation}
One has, for example,
\begin{equation}
\label{phi-one}
\Phi_1 (x) = {\cal A}_1 (x) + \partial_t V_{\mathrm{h,eff}} (x).
\end{equation}
Further explicit results for these quantities can be found in \cite{mswone}. In the expression (\ref{fvphi}) all quantities now depend on the standard 't~Hooft parameter $t$ for
the model with $N$ eigenvalues, and we have thus dropped the explicit dependence on $t$. The derivatives with respect to $t$ can be performed by using standard
results in one-cut matrix models, some of which were listed above. One may now proceed with the integration of $f(x)$,
\begin{equation}\label{fxint}
\int_{x \in {\cal I}} {\rm d} x\, \exp \left( - \frac{1}{g_s} V_{\mathrm{h,eff}} (x) + \Phi (x) \right).
\end{equation}
\noindent
If we wish to evaluate this integral as a perturbative expansion when $g_s$ is small, we can do it using a saddle--point evaluation \cite{lvm,iy}. The saddle--point condition
\begin{equation}
V'_{\mathrm{h,eff}} (x_0) = 0
\end{equation}
requires $x_0$ to be a critical point of the effective potential, as anticipated above.
If we use the explicit form of the spectral curve (\ref{scurve}) we find the equivalent condition
\begin{figure}[!ht]
\leavevmode
\begin{center}
\includegraphics[height=4.5cm]{riemanninst.pdf}
\end{center}
\caption{The spectral curve $y(x)$ has a singular point at the nontrivial saddle $x_0$.}
\label{riemanninst}
\end{figure}
\begin{equation}\label{mx}
M(x_0) = 0.
\end{equation}
Geometrically, the spectral curve is a curve of genus zero pinched at $x_0$, as shown in \figref{riemanninst}. This was observed in \cite{seishi} in the context of spectral curves for double--scaled matrix models, and their relation with minimal strings (see also \cite{kk}). Of course, it can happen that there are many different critical points of the effective potential, therefore more than one solution to (\ref{mx}). In this case, there will be various instantons and we will have to add up their contributions (the leading contribution arising from the instanton with the smallest action, in absolute value).
The calculation of (\ref{fxint}) is now completely standard, and it reduces to Gaussian integrations. The result is
\begin{equation}\label{fexpansion}
\int_{x\in {\cal I}} {\rm d} x\, f(x) = \sqrt{\frac{2 \pi g_s}{V''_{\mathrm{h,eff}} (x_0)}}\, \exp \left( - \frac{1}{g_s} V_{\mathrm{h,eff}} (x_0) + \Phi_1 (x_0) \right) \left( 1 + \sum_{n=2}^{\infty} g_s^n\, f_n \right),
\end{equation}
\noindent
where the $f_n$ can be systematically computed in terms of the functions $\Phi_n(x)$ and their derivatives, evaluated at the saddle--point $x_0$. An explicit expression for $f_2$ can be found in \cite{mswone}.
With these ingredients, we can already calculate the one-instanton contribution to the free energy. Using (\ref{f-trans}), we find
\begin{equation}
F^{(1)}(N)=Z^{(1)}(N)={1\over 2\pi} \frac{Z^{(0)}(N-1)}{Z^{(0)}(N)} \int_{x\in {\cal I}} {\rm d} x\, f(x).
\end{equation}
The quotient of perturbative partition functions is easy to calculate in terms of the free energies,
\begin{equation}
\frac{Z^{(0)}(N-1)}{Z^{(0)}(N)} = \exp \left( F(t')-F(t) \right)= \exp \left( \sum_{n=0}^{\infty} g_s^{n-1} {\cal G}_n \right),
\end{equation}
where
\begin{equation}\label{quotex}
{\cal G}_n \equiv \sum_{k=0}^{\left[ \frac{n}{2} \right]} \frac{(-1)^{n-2k+1}}{(n-2k+1)!}\, \partial_t^{n-2k+1} F_k (t).
\end{equation}
\noindent
One has, for example,
\begin{equation}\label{gex}
{\cal G}_0 =- \partial_t F_0 (t), \qquad
{\cal G}_1 = \frac{1}{2}\, \partial_t^2 F_0 (t).
\end{equation}
\noindent
Putting together (\ref{fexpansion}) and (\ref{quotex}) above, we finally find that $F^{(1)}$ has the structure
\begin{equation}\label{muex}
F^{(1)} = {\rm i}\, g_s^{1/2}\, F_{1,1}\, \exp \left( -\frac{A}{g_s} \right) \left\{ 1 + \sum_{n=1}^{\infty}F_{1,n+1} g_s^n \right\}.
\end{equation}
where, up to one loop, we have:
\begin{equation}
\begin{aligned}
A &= V_{\mathrm{h,eff}} (x_0) - {\cal G}_0 (t), \\
F_{1,1} &= -{\rm i}\, \sqrt{\frac{ 1}{2 \pi V''_{\mathrm{h,eff}} (x_0)}}\, \exp \Big( \Phi_1 (x_0) + {\cal G}_1(t) \Big).
\end{aligned}
\end{equation}
We can give explicit expressions for all the quantities involved in (\ref{muex}) in terms of data which depend only on the spectral curve (\ref{scurve}). First of all, by using (\ref{gex}), (\ref{fzeroders}) and (\ref{dery}) we find
\begin{equation}\label{instdiff}
A = V_{\rm h,eff}(x_0) - V_{\rm h,eff}(b) = \int_b^{x_0} {\rm d} z \, y(z),
\end{equation}
\noindent
which is the instanton action (here, we use the fact that $V_{\rm h,eff}(b)=V_{\rm eff}(b)$). As pointed out in \cite{seishi}, this expression also has a geometric interpretation as the contour integral of the one--form $y(z)\, {\rm d} z$, from the endpoint of the cut ${\cal C}$ to the singular point $x_0$ (see \figref{riemanninst} and \figref{curves}, left side). To compute $F^{(1)}$ up to one--loop, we must compute $\Phi_1(x)$, given in (\ref{phi-one}). One can find the result for $A_{0,2}(x;t)$ (which enters in the expression of ${\cal A}_1$) simply by integrating the first formula in (\ref{annmm}) \cite{iy}
\begin{equation}\label{vann}
A_{0,2}(x;t) = \log \biggl( 1 + \frac{x - (a+b)/2} {\sqrt{(x - a)(x - b)}} \biggr) - \log 2.
\end{equation}
\noindent
Using that (see for example \cite{dfgzj})
\begin{equation}\label{derom}
{\partial (t \omega_0(p)) \over \partial t}={1\over {\sqrt {(p-a)(p-b)}}},
\end{equation}
one further finds,
\begin{equation}\label{dervheff}
\partial_t V_{\rm h,eff}(x) = -4 \log \Bigl[ {\sqrt{x-a}} + {\sqrt{x-b}} \Bigr] +4 \log 2,
\end{equation}
and one then obtains
\begin{equation}
\Phi_1(x) = - \log \Bigl[ (x-a)(x-b) \Bigr].
\end{equation}
\noindent
Adding to $\Phi_1(x)$ the result for ${\cal G}_1(t)$, which follows from (\ref{fzeroders}), it is simple to put all expressions together
and obtain the contribution, $F_{1,1}$, of the one--loop fluctuations around the one--instanton configuration,
\begin{equation}\label{rone}
F_{1,1} = -{\rm i}\, {b-a \over 4} {\sqrt{1 \over 2 \pi M'(x_0) \Bigl[(x_0-a)(x_0-b)\Bigr]^{5\over 2}}}.
\end{equation}
\noindent
This formula is valid for any one--cut matrix model where the potential has an extra critical point $x_0$. Notice that if $x_0$ is a local maximum of $V_{\rm eff}(x)$, one will have that $M'(x_0)<0$, and hence $F_{1,1}$ will be real. This result extends previous calculations in \cite{david,lvm} to one-cut matrix models away from the critical point.
\begin{example} {\it Large $N$ instanton in the quartic matrix model}. The quartic matrix model is defined by the potential
\begin{equation}
\label{quarticpot}
V(x) = {1\over 2} x^2 -{\lambda\over 48} x^4,
\end{equation}
where we follow the normalization of \cite{mmnp}. This potential has three critical values, namely
\begin{equation}
\label{critical}
x=0, \qquad x=\pm {2 {\sqrt{3}} \over {\sqrt{\lambda}}},
\end{equation}
so the generic matrix model based on this potential has three cuts. We can however consider
the one-cut model where the eigenvalues sit around the origin $x=0$. The spectral curve for this one-cut background can be easily obtained
with the standard matrix model techniques reviewed above. It is given by
\begin{equation}
y=M(x) {\sqrt {x^2-4 \alpha^2}}
\end{equation}
where the moment function is
\begin{equation}
M(x)=1-{\lambda \over 6} \alpha^2 - {\lambda \over 12} x^2.
\end{equation}
The cut is located at ${\cal C}=[-2\alpha, 2\alpha]$, and the endpoints are determined by
\begin{equation}
\label{asq}
\alpha^2={2\over \lambda} \biggl( 1- {\sqrt { 1 - t \lambda}}\biggr).
\end{equation}
The moment function has two zeros which give two non--trivial saddle--points of the effective potential, namely $\pm x_0$ with
\begin{equation}\label{qsaddle}
x_0^2 ={12 \over \lambda} -2 \alpha^2.
\end{equation}
As $t \rightarrow 0$, $\alpha \rightarrow 0$ and they become the two non-trivial critical points of the potential in (\ref{critical}).
Since the potential is symmetric, there are {\it two} one-instanton solutions, corresponding to one eigenvalue tunneling from ${\cal C}$ to the two saddles $\pm x_0$. Both instantons have the same action, which is computed by integrating the spectral curve as in (\ref{instdiff}),
\begin{equation}
\label{quartic-inst}
\begin{aligned}
A(t)&= {3\over \lambda } \sqrt{\left(1-\frac{\alpha ^2 \lambda }{2}\right)
\left(1-\frac{\alpha ^2 \lambda }{6}\right)}\\
&-\frac{1}{4} \alpha ^2
\left(\alpha ^2 \lambda -4\right) \left(\log
\left(\frac{\alpha ^2 \lambda }{3}\right)-2 \log
\left(\sqrt{1-\frac{\alpha ^2 \lambda }{2}}+\sqrt{1-\frac{\alpha ^2
\lambda }{6}}\right)\right).
\end{aligned}
\end{equation}
This function has the small $t$ expansion
%
\begin{equation}
A(t)={3\over \lambda}+t \left( -1+ \log \left( {\lambda t \over 12} \right) \right) +{\cal O}(t^2).
\end{equation}
The first term (i.e. the instanton action as $t\rightarrow 0$) is given by
\begin{equation}
V\left( {2 {\sqrt{3}} \over {\sqrt{\lambda}}} \right) -V(0),
\end{equation}
in agreement with (\ref{camm}). The structure of the next-to-leading correction is in agreement with the general expectation (\ref{exlna}).
\end{example}
\begin{example} {\it Large $N$ instanton in the cubic matrix model}. Let us consider the matrix model analogue of the Airy integral (\ref{lamint}). The potential is
\begin{equation}
\label{stokespot}
V(x)= -{\rm e}^{{\rm i} \kappa} x + {x^3 \over 3}.
\end{equation}
This has two critical points
\begin{equation}
x^{{\rm L}, {\rm R}}=\mp \zeta^{1/2}
\end{equation}
where we have introduced the variable
\begin{equation}
\zeta={\rm e}^{{\rm i} \kappa}.
\end{equation}
The most general background of a matrix model based on this potential
is a two-cut configuration, labelled by $(N_1, N_2)$, where $N_1$, $N_2$ are the number of eigenvalues near the critical points $x^{{\rm L}, {\rm R}}$, respectively.
Let us consider the one-cut background where all the eigenvalues sit near $x^{\rm L}$, i.e.
\begin{equation}
(N_1, N_2)=(N,0).
\end{equation}
The endpoints of the cut $(a,b)$ are determined by the equations
\begin{equation}
x_0(x_0^2-\zeta)=t, \qquad \delta^2=2(\zeta-x_0^2),
\end{equation}
where
\begin{equation}
a=-x_0+\delta, \qquad b=-x_0-\delta.
\end{equation}
The spectral curve is given by
\begin{equation}
y(x) =(x-x_0) {\sqrt{x^2+2xx_0 + 3x_0^2-2\zeta}}.
\end{equation}
The effective potential is (up to a constant) the integral of $y(x)$, which gives
\begin{equation}
\begin{aligned}
V_{\rm eff}(x)&=\frac{1}{3}\left( x (x -x_0) -2 \zeta\right) {\sqrt{x^2+2xx_0 + 3x_0^2-2\zeta}} \\
&-2 x_0 \left(x_0^2-\zeta \right) \log \left(x_0+x+{\sqrt{x^2+2xx_0 + 3x_0^2-2\zeta}}\right).
\end{aligned}
\end{equation}
%
The instanton action is given by
%
\begin{equation}
A=\int_b^{x_0} y(x) {\rm d} x =V_{\rm eff}(x_0)-V_{\rm eff}(b).
\end{equation}
%
It has the small $t$ expansion
%
\begin{equation}
A=-{4\over 3} \zeta^{3/2}+t \left( -1+ \log {t\over 8 \zeta^{3/2}} \right) +{\cal O}(t^2),
\end{equation}
and as expected the leading term as $t\rightarrow 0$ is given by (\ref{camm}),
\begin{equation}
-{4\over 3}\zeta^{3/2} =V(x^{\rm R})-V(x^{\rm L}).
\end{equation}
\end{example}
\subsubsection{Large $N$ instantons from multi-cuts}
\label{multi-multi}
It is clear that the instanton configurations in the one-cut matrix model that we have just analyzed, in which $\ell$ eigenvalues tunnel to other
critical point, can be regarded as particular cases of a {\it two-cut} configuration with
\begin{equation}
N_1=N-\ell, \qquad N_2=\ell, \qquad \ell\ll N.
\end{equation}
Therefore, we should be able to calculate the general instanton partition function $Z_N^{(\ell)}$ as a particular case of a two-cut partition function, i.e. we expect
\begin{equation}
\label{onefromtwo}
Z^{(\ell)}(N)= Z(N-\ell, \ell).
\end{equation}
This was made explicit in \cite{mswtwo} and gives compact formulae for the general $\ell$-instanton sector of the one-cut matrix model.
We can evaluate the r.h.s. of (\ref{onefromtwo}) by expanding the free energy of the two-cut matrix model
\begin{equation}
F\left(t_1=t-g_s\ell, t_2=g_s \ell \right)
\end{equation}
in powers of $g_s$. There is however one subtlety: the free energies $F_g(t_1, t_2)$ are \textit{not} analytic at $t_2=0$.
Geometrically, this corresponds to the fact that we are expanding around a configuration in which the second cut of the
spectral curve is completely pinched, as shown in \figref{riemanninst}. However, as we mentioned in (\ref{g-sub}),
this non-analyticity is due only to the Gaussian contribution to the two-cut integral.
Therefore, the functions $\widehat F_g (t_1, t_2)$ defined by
\begin{equation}
F_g(t_1, t_2) = F^{\rm G}_g(t_2) + \widehat F_g(t_1, t_2),
\end{equation}
\noindent
where $F^{\rm G}_g(t)$ are the genus $g$ Gaussian free energies written down in (\ref{gaussiang}), are analytic at $t_2=0$.
Physically, the reason for the appearance of this singularity is that in this problem $t_2=\ell g_s$, and $\ell$ is small as compared to $N$.
Therefore, it is not appropriate to treat the integration over the $\ell$ tunneling eigenvalues from the point of view of the large $N$
expansion. Instead, they should be integrated exactly. This argument also suggests that, in order to regularize the computation,
we should subtract $F^{\rm G}(t_2)$ from the total free energy and at the same time multiply $Z^{(\ell)}$ by the
{\it exact} partition function $Z^{\rm G}_\ell$, which is given in (\ref{gaussianN}). The appropriate expression
for the partition function around the $\ell$--instanton configuration is then
\begin{equation}\label{interzn}
Z^{(\ell)} = Z^{\rm G}_\ell\, \exp \biggl[ \sum_{g\ge 0} g_s^{2g-2} \left( \widehat F_g (t-\ell g_s, \ell g_s) - F_g (t) \right)\biggr].
\end{equation}
By expanding in $g_s$, this expression leads to the general formula
%
\begin{equation}
\label{npboundary}
\begin{aligned}
Z^{(\ell)}_N=& {g_s^{\ell^2/2} \over (2\pi)^{\ell/2}}\, G_2(\ell+1)\, \, \hat q^{\frac{\ell^2}{2}}\, \exp \left( - \frac{\ell A}{g_s} \right) \\
& \times \sum_{k} \sum_{m_i>0}\sum_{g_i>1-{m_i\over 2}} {g_s^{\sum_i (2g_i+m_i-2)}\over k! m_1!\,\dots\, m_k!}\,\,\,
\widehat F_{g_1}^{(m_1)}\dots \widehat F_{g_k}^{(m_k)} (-\ell)^{\sum_i l_i}.
\end{aligned}
\end{equation}
In this equation we have introduced the following notations: the $m$-th derivative
of $\widehat F_g$ is taken w.r.t. the variable $s$, which is defined as
\begin{equation}
\label{average}
s={1\over 2}(t_1 -t_2).
\end{equation}
All derivatives are evaluated at $t_1=t$ and $t_2=0$. The exponential factor involves the instanton action, which is given by
\begin{equation}\label{hata}
A(t) = \partial_{s} \widehat F_0
\end{equation}
and
\begin{equation}
\hat q = \exp \Bigl( \partial_s^2 \widehat F_0 \Bigr).
\end{equation}
At leading order in $g_s$, we have
\begin{equation}
\begin{aligned}
&\sum_{k} \sum_{m_i>0}\sum_{g_i>1-{m_i\over 2}} {g_s^{\sum_i (2g_i+m_i-2)}\over k! m_1!\,\dots\, m_k!}\,\,\, \widehat F_{g_1}^{(m_1)}\dots \widehat F_{g_k}^{(m_k)} (-\ell)^{\sum_i l_i}
\\
&=1 - g_s \Bigl( \ell\, \partial_{s} \widehat F_1 (t) + {\ell^3 \over 6}\, \partial_s^3 \widehat F_0 (t) \Bigr) + {\cal O}(g_s^2).
\end{aligned}
\end{equation}
By using the general formulae for the free energies in the two-cut model, one can check \cite{mswtwo}
that (\ref{npboundary}) reproduces the one-instanton contribution to the free energy
computed in the previous subsection and generalizes it to arbitrary $\ell$.
\subsubsection{Large $N$ instantons from orthogonal polynomials}
\label{diff-matrix}
So far we have analyzed the ``perturbative" sector of the one-cut matrix model from the point of view of the spectral curve: the procedure explained in \cite{ce,eo} gives
a systematic way to compute the ``perturbative" $1/N$ corrections in terms of data of this curve, and the results reviewed above
show that the formal trans-series giving the instanton corrections can be also computed
using data of the spectral curve. For example, the instanton action is given by a period of the
differential $y(x) {\rm d} x$ along a cycle, and the loop corrections can be computed from
the knowledge of the curve only.
However, in the one-cut case there is a completely different way to obtain the $1/N$ corrections in the perturbative sector,
by using orthogonal polynomials \cite{bessis,biz} (see \cite{alvarezmedina} for some recent developments). It was shown in \cite{mmnp} that this procedure can be generalized to
compute systematically multi-instanton effects in the one-cut sector (partial results in this direction can be found in \cite{lattice,akk,sy}).
This technique is very powerful in order to go to higher loops in simple matrix models. In addition, it is formally very similar to the trans-series
method that we developed in section \ref{ODEs}. Therefore, it makes it possible to apply many of the techniques typical of the theory of resurgence (the connection
to resurgence was further developed in \cite{asv}).
Another advantage of this method is that it can be applied to unitary matrix models.
The basic idea of the technique of orthogonal polynomials is the following (see \cite{dfgzj,mmleshouches} for reviews): in the matrix integral (\ref{zmm}), if we regard
\begin{equation}
{\rm d}\mu = {\rm e}^{-{1\over g_s} V(\lambda)} {{\rm d}\lambda \over 2 \pi}
\end{equation}
as a measure in ${\mathbb R}$, we can introduce orthogonal polynomials $p_n(\lambda)$ defined by
\begin{equation}
\int {\rm d}\mu \, p_n(\lambda) p_m(\lambda) = h_n \delta_{nm},\quad n\ge 0,
\label{ortho}
\end{equation}
where $p_n(\lambda)$ are normalized by requiring the behavior $p_n(\lambda)=\lambda^n +\cdots$.
One then easily finds,
\begin{equation}
\label{parth}
Z =\prod_{i=0}^{N-1} h_i = h_0^N \prod_{i=1}^N r_i^{N-i},
\end{equation}
where we have introduced the coefficients
\begin{equation}
\label{rcoeff}
r_k= {h_k \over h_{k-1}}, \qquad k\ge 1,
\end{equation}
which appear in the recursion relations for the $p_n(\lambda)$,
\begin{equation}
\label{recurs}
(\lambda + s_n ) p_n(\lambda) = p_{n+1}(\lambda) + r_n p_{n-1}(\lambda).
\end{equation}
In this subsection $F$ will denote the normalized free energy, which is obtained by subtracting the Gaussian free energy,
\begin{equation}
F=\log Z -\log Z^{\rm G}.
\end{equation}
At finite $N$, $F$ is given by the following formula:
\begin{equation}
g_s^2 F= {t^2 \over N} \log {h_0\over h_0^G} + {t^2
\over N} \sum_{k=1}^N \biggl( 1-{k\over N} \biggr)
\log {r_k \over k g_s},
\label{allf}
\end{equation}
where $h_0^G$ is the coefficient $h_0$ for the Gaussian model.
In order to proceed, we introduce a continuous variable as $N \rightarrow \infty$,
\begin{equation}
\label{continuumvar}
g_s k \rightarrow z, \qquad 0\le z \le t,
\end{equation}
and we assume that in this continuum, $N \rightarrow \infty$ limit, $r_k$ becomes a function of $z$ and $g_s$,
\begin{equation}
r_k \rightarrow R (z,g_s).
\end{equation}
It will be useful to consider the function
\begin{equation}
\Xi(z,g_s) = {R(z,g_s) \over z}
\end{equation}
which can be regarded as the continuum limit of $r_k/(k g_s)$. It is easy to see that, for polynomial potentials of the form
\begin{equation}
V(M) = {1\over 2}M^2 +\cdots,
\end{equation}
one has $r_k \sim k g_s +\cdots$, therefore the function $\log (r_k/(k g_s))$ is regular at $k=0$ and we
can use the standard Euler--Maclaurin summation formula to evaluate (\ref{allf}). One then obtains \cite{bessis,biz}:
\begin{equation}
\label{ofgex}
\begin{aligned}
g_s^2 F &= \int_0^t {\rm d} z\, (t-z) \log \Xi (z)+ \sum_{p=1}^{\infty} g_s^{2p}\, \, {B_{2p} \over (2p)!}\, \frac{{\rm d} ^{2p-1}}{{\rm d} z^{2p-1}} \biggl[ \left( t-z \right) \log \Xi(z,g_s) \biggr] \bigg|_{z=0}^{z=t} \\
&+ {t g_s \over 2 } \biggl[ 2 \log {h_0 \over h_0^{\rm G}} - \log \Xi (0,g_s)\biggr].
\end{aligned}
\end{equation}
From this expression one can deduce the difference equation,
\begin{equation}
\label{diffeq}
F(t+g_s) + F(t-g_s)-2 F(t) =\log \Xi,
\end{equation}
which can be also obtained by starting from the identity
\begin{equation}
{Z_{N+1} Z_{N-1} \over Z_N^2} =r_N.
\end{equation}
In order to compute the $g_s$ expansion of the free energy, one has to first find an expansion for the function $R(z,g_s)$ of the form
\begin{equation}
\label{zeroex}
R^{(0)}(z,g_s) = \sum_{s=0}^{\infty} g_s^{2s} R_{0,2s}(z).
\end{equation}
Once this expansion is plugged in $\Xi(z,g_s)$ and then in (\ref{ofgex}), the genus expansion follows.
In order to obtain (\ref{zeroex}) one has to use the so-called
{\it pre-string equation}. This is a difference
equation for $R(z,g_s)$ which can be derived as the continuum limit of the recursion relations obeyed by the coefficients (\ref{rcoeff}).
The pre-string equation can be explicitly written for any polynomial potential \cite{biz,dfgzj}. For example, in the
case of the quartic matrix model with potential (\ref{quarticpot}), the difference equation for $R(z,g_s)$ reads as
\begin{equation}
\label{diff}
R(z,g_s)\Bigl\{ 1 -{\lambda\over 12} (R(z,g_s) + R(z+g_s,g_s) + R(z-g_s,g_s) \Bigr\}=z.
\end{equation}
These types of equations have a solution of the form (\ref{zeroex}), and they determine $R_{0,s}(z)$ in terms of the $R_{0,s'}(z)$, $s'<s$.
When this solution is plugged in (\ref{diffeq}), one obtains the perturbative expansion of the total free energy in powers of $g_s$,
which is the standard $1/N$ expansion of the matrix model \cite{bessis,biz}.
Difference equations, just like differential equations, admit
trans-series solutions, and one could guess that the trans-series solution to the difference equation governing
$R(z,g_s)$ encodes the multi-instanton amplitudes of the full matrix model. This was first proposed in \cite{mmnp}. It was then verified that the results obtained
with this technique agree with the previous techniques in the quartic matrix model \cite{mmnp,asv} and in the cubic matrix model \cite{mswtwo}.
To obtain the trans-series solutions to the pre-string equation, we consider a more general ansatz than (\ref{zeroex}),
\begin{equation}
\label{transr}
R(z,g_s) = \sum_{\ell=0}^{\infty} C^{\ell} R^{(\ell)}(z,g_s),
\end{equation}
where $R^{(0)}(z,g_s)$ is given by (\ref{zeroex}), and for $\ell\ge1$ we have
\begin{equation}
\label{kpert}
R^{(\ell)}(z,g_s)= {\rm e}^{-\ell A(z)/g_s} R_{\ell,1}(z) \Bigl( 1+\sum_{n=1}^{\infty} g_s^{n} R_{\ell,n+1}(z) \Bigr),\qquad \ell \ge 1.
\end{equation}
%
Once this ansatz is plugged in the difference equation for $R(z,g_s)$, one obtains a recursive system of equations for the different quantities involved. The
quantity $A(z)$, which is a parameter-dependent instanton action, is determined by an equation of the form
%
\begin{equation}
A'(z) =f(R_{0,0}(z)),
\end{equation}
%
where $f$ is a function fixed by the difference equation. For $\ell=1, n>0$, one
obtains an equation which determines
%
\begin{equation}
{{\rm d} R_{1,n} (z) \over {\rm d} z}
\end{equation}
in terms of $R_{1,n'}(z)$ with $n'<n$. For $n=1$, we have a differential equation for the logarithmic derivative, i.e. for
\begin{equation}
{1\over R_{1,1} (z)} {{\rm d} R_{1,1} (z) \over {\rm d} z}.
\end{equation}
The integration constant for $R_{1,1}(z)$ can be reabsorbed in the parameter $C$, and for $A(z)$ and the $R_{1,n} (z)$, $n>1$ the integration constants are fixed by using
appropriate boundary conditions. For $\ell>1$, the difference equation determines $R_{\ell,n}$ in terms of $R_{\ell',n'}$ with $\ell<\ell'$.
The trans-series structure of $R(z,g_s)$ leads to a trans-series structure for the full free energy, as in (\ref{f-trans}). We will write it as
\begin{equation}
\label{fullf}
F(t,g_s) = \sum_{\ell=0}^{\infty} C^{\ell} F^{(l)}(t,g_s),
\end{equation}
where
\begin{equation}
\label{fkinst}
F^{(\ell)}(z,g_s)= {\rm e}^{-\ell A(t)/g_s} F_{\ell,1}(z) \Bigl( 1 +\sum_{n=1}^{\infty} g_s^{n} F_{\ell,n+1}(z)\Bigr), \qquad \ell\ge 1.
\end{equation}
Once (\ref{transr}) is known, one can plug it in (\ref{diffeq}) to deduce the $F^{(\ell)}(t,g_s)$, see \cite{mmnp} for more details.
The resulting amplitude is nothing but the $\ell$-instanton
amplitude of the full matrix model.
\begin{example} {\it The quartic matrix model}. As an example, let us present some results for multi-instanton corrections
in the quartic matrix model with
the potential (\ref{quarticpot}). The perturbative solution (\ref{zeroex}) has been much studied since
it was first worked out in the pioneering papers \cite{bessis,biz}. The planar part is given by
\begin{equation}
R_{0,0}(z)={2\over \lambda} \Bigl(1-{\sqrt{1-\lambda z}}\Bigr).
\end{equation}
As already noticed in \cite{biz}, it turns out to be useful to express all results in terms of
\begin{equation}
r=R_{0,0}(z).
\end{equation}
For the higher $g_s$ corrections one finds,
\begin{equation}
\begin{aligned}
R_{0,2}(z)&={2 \lambda^2 \over 3} {r\over (2-\lambda r)^4},\\
R_{0,4}(z)&={28 \lambda^4\over 9} {r (5 + \lambda r) \over (2- \lambda r)^9},\\
R_{0,6}(z)&= {4 \lambda^6 \over 27} \frac{r \left(111 \lambda^2 r^2+5728 \lambda r+7700\right)}{(2-\lambda r)^{14}},
\end{aligned}
\end{equation}
and so on. If we now plug the trans-series ansatz (\ref{transr}) in the difference equation (\ref{diff}), we find a system of recursive difference equations
for the $R^{(\ell)}(z,g_s)$ which can be solved by using the ansatz (\ref{kpert}). Let us focus on
$\ell=1$, the one-instanton solution. The first thing to compute is $A(z)$, which
corresponds physically to the instanton action. One finds, at leading order in $g_s$,
\begin{equation}
\cosh (A'(z)) = 2 {3-\lambda r \over \lambda r}.
\end{equation}
This can be integrated to find $A(z)$ up to an additive constant and an overall sign (since $\cosh\, z$ is even). Since $z$ stands here for the
't Hooft parameter, both
ambiguities can be fixed by requiring that, as $z\rightarrow 0$, the instanton action becomes (\ref{camm}). The result is
\begin{equation}
\begin{aligned}
A(z)&= - \int {\rm d} r \cosh^{-1} \Bigl( 2 {3-\lambda r \over \lambda r}\Bigr) \Bigl(1-{\lambda r\over 2}\Bigr)\\
= &\frac{1}{4} r (\lambda r-4) \cosh ^{-1}\left(\frac{6}{\lambda r}-2\right)+{1\over 2 \lambda}
\sqrt{3 (2- \lambda r)(6-\lambda r)}.
\end{aligned}
\label{quarticinstanton}
\end{equation}
%
It can be checked that (\ref{quarticinstanton}) coincides with the instanton action
of the quartic matrix model computed in terms of its spectral curve in (\ref{quartic-inst}).
Once the instanton action is known, we can proceed to compute $R_{1,1}(z)$. The equation one obtains at the next order in $g_s$ is
\begin{equation}
{R'_{1,1}(z) \over R_{1,1}(z)}=-{1\over 2} \coth (A'(z)) A''(z),
\end{equation}
which can be immediately integrated as
\begin{equation}
R_{1,1}(z)= \Bigl( \sinh (A'(z))\Bigr)^{-1/ 2}.
\end{equation}
The rest of the coefficients can be found by integrating the resulting equations for $R_{1,n}(z)$, and from (\ref{diffeq}) one finds
the loop expansion of the one-instanton free energy, see \cite{mmnp,asv} for explicit formulae. The results agree with those obtained with the method
of direct calculation in subsection \ref{direct-cal} and with the result obtained from the multicut matrix model in subsection \ref{multi-multi}.
\end{example}
\subsection{Large $N$ instantons, large order behavior and the spectral curve}
\label{inst-spectral}
In the previous section we have discussed various techniques to compute large $N$ instanton effects in one-cut matrix models. The total free energy, including multi-instanton sectors,
is the analogue of the trans-series solution in this situation. We should then expect a connection between the large order behavior of the perturbative genus expansion and the instanton trans-series,
generalizing the connections that we have seen in ODEs and in QM. Let us consider the total free energy
\begin{equation}
{\cal F} (g_s) = g_s^2\, F(g_s),
\end{equation}
which is defined in such a way that the perturbative sector has no negative powers of $g_s$. The one-instanton contribution, or first trans-series, yields an expansion of the form
\begin{equation}\label{curlfa}
{\cal F}^{(1)} (z) = {\rm i} z^{\beta/2} {\rm e}^{-\frac{A}{\sqrt{z}}} F_{1,1}\left( 1+ \sum_{n=1}^{\infty} F_{1,n+1} z^{n/2}\right),
\end{equation}
\noindent
where $z=g_s^2$. This is an important feature distinguishing matrix models and string theory from field theory and QM: the action of an instanton goes like $1/\sqrt{z}$, and not as $1/z$. Similarly, the perturbation series around the instanton sector is a series in powers of $\sqrt{z}$, and not a series in powers of $z$. We may now write
\begin{equation}
{\cal F}^{(0)} (z)=\sum_{g=0}^{\infty} F_g(t)\, z^g.
\end{equation}
Let us assume that there is a ``resurgent" relation between the perturbative sector and the first trans-series. In analogy with the calculation in (\ref{per-np-odes}), we expect
\begin{equation}\label{lostringl}
\begin{aligned}
F_g & = {1\over 2\pi} \int_0^{\infty} {{\rm d} z \over z^{g+1}} z^{\beta/2} {\rm e}^{-\frac{A}{\sqrt{z}}} F_{1,1}\left( 1+ \sum_{n=1}^{\infty} F_{1,n+1} z^{n/2}\right)\\
& \sim {F_{1,1} \over \pi} \left( A^{-2g+\beta} \Gamma(2g-\beta)+ \sum_{n=1}^{\infty} F_{1,n+1} A^{-2g+\beta+n} \Gamma(2g-\beta-n)\right).
\end{aligned}
\end{equation}
This leads to an asymptotic expansion of $F_g$ in powers of $1/g$. Up to two loops we can write it as
\begin{equation}
\label{large-g-beh}
F_g \sim {A^{-2g+\beta} \over \pi}\, \Gamma(2g-\beta)\, F_{1,1} \left[1 + {F_{1,2} A \over 2g} + \cdots \right].
\end{equation}
This gives a prediction for the large-order behavior of genus $g$ free energies in the one-cut matrix model.
In writing (\ref{large-g-beh}) we have implicitly assumed that there is a single instanton solution that contributes to the asymptotic behavior.
In general there might be various instanton configurations in the system, with the same action in absolute value, and in this case $F^{(1)}$
will denote the sum of all these contributions. It is also common to have complex instanton solutions which give
complex conjugate contributions to $F^{(1)}$, as we saw in QM. In this case the asymptotic behavior of $F_g$ is again
obtained by adding their contributions. If we write
\begin{equation}\label{thetas}
A =|A| {\rm e}^{-{\rm i} \theta_A}, \qquad F_{1,1}=|\mu|{\rm e}^{{\rm i} \theta},
\end{equation}
the leading asymptotics will read in this case
\begin{equation}\label{acos}
F_g \sim {|A|^{-2g+\beta} \over \pi}\, \Gamma(2g-\beta)\, |\mu|\, \cos \bigl( (2g-\beta) \theta_A + \theta \bigr).
\end{equation}
We have also assumed so far that the large order behavior is dominated by the instanton associated to eigenvalue tunneling.
However, there is a subtlety concerning the large order behavior and related to the Gaussian part of the free energy. For small $t$, the behavior of the free energy is dominated by
its Gaussian part,
\begin{equation}
F_g(t) \approx {B_{2g} \over 2g (2g-2) t^{2g}}, \qquad t \rightarrow 0.
\end{equation}
Using the asymptotics (\ref{bernoulli-as}), we can see that the Gaussian part indeed displays the large $g$ behavior (\ref{large-g-beh}), with
\begin{equation}
\label{triv-inst}
A=-2 \pi t, \qquad \beta=1.
\end{equation}
However, this behavior is due to the universal Gaussian part of the free energy. For other values of $t$, the large order
behavior is indeed controlled by the non-trivial instantons due to eigenvalue tunneling and discussed above.
In the quartic matrix model, for example, there is a non-trivial instanton with action (\ref{quartic-inst}),
and one sees from the explicit calculation in (\ref{muex}) that this instanton has
\begin{equation}
\beta={5\over 2}.
\end{equation}
One can test the conjectural asymptotics (\ref{large-g-beh}) governed by this non-trivial instanton by considering the normalized free energy obtained, i.e.
by subtracting the Gaussian free energy. This was done numerically in \cite{mswone,asv} by looking at the sequence of the first $F_g$, and one can see that (\ref{large-g-beh}) is indeed satisfied.
\FIGURE[ht]{\label{curves}
\includegraphics[height=4cm]{onecut.pdf} \qquad \qquad
\includegraphics[height=4cm]{twocut.pdf}
\caption{The left-hand side shows the spectral curve in the one-cut phase of the cubic matrix model. The instanton action relevant in the double-scaling limit is obtained by
calculating the $B$-period of the one-form $y(x){\rm d} x$, which goes from the filled cut $A_1$ to the pinched point. The two-cut phase, in which the pinched point becomes a filled interval, is shown on the right hand side. The instanton action is still given by the $B$-period integral.}}
The appearance of two different instanton actions, corresponding to the ``trivial" action (\ref{triv-inst}) and the non-trivial instanton action associated to eigenvalue tunneling, has a natural geometric interpretation \cite{dmpnp}. The instanton action describing eigenvalue tunneling in the one-cut phase can be written in terms of the spectral curve $y(x)$ as in (\ref{instdiff}). This can be in turn written as a period integral of the natural meromorphic form $y(x) {\rm d} x$ along a
$B$ cycle which goes from the filled cut to the critical point:
\begin{equation}\label{onecutia}
A_B={1\over 2} \oint_B y(x) {\rm d} x.
\end{equation}
In \figref{curves} (left) we show the pinched curve describing the one-cut curve. The $A_1$ cycle corresponds to the filled cut, and the $B$ cycle goes from $A_1$ to the pinched cycle. The $A_1$-period going around the filled cut is just proportional to the 't Hooft parameter:
\begin{equation}
\label{gaussper}
A_{A_1} (t) =2\pi {\rm i} t ={1\over 2} \oint_{A_1} y(x) {\rm d} x.
\end{equation}
As we have just seen, both the $A_1$ and the $B$ periods give rise to instantons in the matrix model. The instanton corresponding to the $A_1$ period is the ``trivial" one and governs the asymptotics near $t=0$, through the Gaussian part of the potential. In other regions of the $t$-plane, the large genus behavior will be controlled by $B$-periods $A_B(t)$ of the form (\ref{onecutia}). In general, the action controlling the large order behavior at a given point $t$ will be proportional to the smallest period of the meromorphic form $y(x){\rm d} x$ (in absolute value). The B-type periods $A_B(t)$ vanish
at critical values of the 't Hooft parameter, and the $A_1$ period vanishes at $t=0$,
so in both cases the instanton action is given by a ``vanishing cycle."
This result can be generalized to two-cut phases, where the pinched point is now resolved into a second cut $A_2$:
the instanton action is still given by the $B$-cycle integral, now going from the first cycle $A_1$ to the second cycle $A_2$, see \figref{curves} (right).
This instanton action controls the large order behavior of the free energies in the appropriate regions of moduli space. This was verified in \cite{kmr} for the two-cut, cubic matrix model.
This picture of instanton actions as periods of the spectral curve, pointed out in \cite{dmpnp}, seems to be the most general framework explaining the large order structure of a
very general class of matrix models.
For example, in the Chern--Simons matrix model for ${\mathbb S}^3$ introduced in \cite{mm} and briefly reviewed in the Example \ref{cs-example},
the instanton actions are given by \cite{ps}
\begin{equation}
\label{cs-periods}
2 \pi {\rm i} \left( t+ 2\pi {\rm i} n \right), \qquad n \in {\mathbb Z}.
\end{equation}
For $n=0$ one recovers the action governing the Gaussian behavior. The instantons with $n=\pm 1$ can be detected through the large order behavior of the genus $g$ free energies, once the Gaussian part is subtracted \cite{ps}. Since the spectral curve describing this model admits a constant period, one can regard (\ref{cs-periods}) as a linear combination of periods. A more detailed
discussion of this general point of view on instanton actions can be found in \cite{dmpnp}.
\subsection{Classical asymptotics and the Stokes phenomenon in matrix models}
In the previous subsections we have introduced the basic ingredients to study the large $N$ asymptotics of matrix models. Once a background is chosen,
it leads to a formal perturbative series in $g_s$, as in (\ref{largeNas}), and the rest of the sectors in (\ref{sumz}) lead to a formal trans-series.
We would now like to proceed to step two in the program
sketched in the introduction, i.e. we want solve the problem of classical asymptotics in the
case of matrix models at large $N$, or, equivalently, we want to find the formal expansions which provide the asymptotics in each
region of the complex space of parameters, paying attention to the Stokes phenomenon.
This turns out to be a non-trivial and rich problem. We will discuss some of its aspects by looking at a concrete example: we will consider the matrix model
version of the Airy function. This model is defined by the partition function (\ref{zmm}) where $\gamma=\gamma_1$
is the contour shown in \figref{airycontours} and the potential $V(x)$ is given by (\ref{stokespot}). Without loss of generality,
we will assume that $g_s$ is real and positive, and we will study how this partition function changes as we change $t >0$ (the 't Hooft parameter) and the
parameter $\kappa$ in the potential.
For small $t$, as we discussed in (\ref{limitZ}), the saddle point structure should be the same
as for the Airy function. Therefore, at small $t$ we expect the following phase structure.
For $|\kappa|<2 \pi/3$, the dominant saddle is the one-cut configuration
where all the
eigenvalues sit near $x^{\rm L}=-\zeta^{1/2}$, i.e.
\begin{equation}
(N, 0),
\end{equation}
and
\begin{equation}
Z_{\gamma_1} (N, g_s)\sim Z(N,0).
\end{equation}
The other saddles, where eigenvalues will tunnel to the critical point at $x^{\rm R}$, are not relevant since the integration path does not pass through $x^{\rm R}$.
For $2\pi/3\le |\kappa|<\pi$ the integration path gets deformed into the sum of the two steepest-descent paths, and the other instanton configurations will start contributing by eigenvalue tunneling. However, at least at small $t$ they should be exponentially suppressed. In other words, the
dominant configuration to the matrix integral in this region should still be the boundary saddle $(N,0)$, but there will be exponentially small corrections due to tunneling to the other critical point $x^{\rm R}$, and we will have
%
\begin{equation}
Z_{\gamma_1}(N, g_s)= Z(N,0) \left( 1+ \sum_{\ell=1}^\infty C^\ell Z^{(\ell)}(N)\right),
\end{equation}
%
where $Z^{(\ell)}(N)$ is given by (\ref{npboundary}) and $C$ is an appropriate constant. In contrast to the case of the Airy function, in the large $N$ limit there is an infinite number of corrections
involving the subleading saddle points. The above formula is the analogue of (\ref{stokesints}) for the ``matrix Airy integral." Of course, from the point of view of classical asymptotics,
this infinite series of exponentially small corrections
is not taken into account, but it will be needed if we want to perform Borel resummations. Notice that the exponentially small corrections in (\ref{npboundary})
correspond to the eigenvalue tunneling depicted in \figref{tunnel}, i.e.
\begin{equation}
(N, 0) \rightarrow (N-\ell, \ell).
\end{equation}
Now, a very interesting thing happens when we reach the anti-Stokes line $\kappa=\pi$. The real part of the
action $A(t)$ vanishes at $t=0$, and it is actually negative for small $t$, therefore the subleading corrections are no longer negligeable. This is
the familiar ``return of the small exponential" along the anti-Stokes line. There is however a crucial difference with the
case of the Airy function. The reason is that, in the large $N$ limit, there is an {\it infinite} number of corrections that come to life on that line, namely all the sub-leading terms in
(\ref{npboundary}). In fact, the same situation arises in non-linear ODEs, where this leads to the formation of
singularities along anti--Stokes lines, see for example \cite{costincostin}.
It is clear that in order to obtain a reasonable asymptotics we must somehow sum all the corrections. We proceed as follows. It is clear that the saddle $(N,0)$ is no longer a good starting point for the expansion. We have to find a new dominant saddle, which must necessarily have $N^*_2 \not=0$. In order for this saddle to be stable, the effective potentials on both cuts have to be the same. This requires
\begin{equation}
\label{realvanish}
{\rm Re} \, \left({F_0'(N_i^*)\over g_s} \right)=0,
\end{equation}
where the derivative is w.r.t. the variable $s$ defined in (\ref{average}). This condition was first proposed in \cite{jur}, and it is an equipotential condition which
blocks the ``flow" of eigenvalues. A way to derive this condition is to write the sum (\ref{sumz}), in the case $d=2$, as
\begin{equation}
C_1^N \sum_{n=0}^N \left( {C_2 \over C_1} \right)^n {\rm e}^{g_s^2 F_0(n) + \cdots}
\end{equation}
where we denoted $N_1=N-n$, $N_2=n$. Requiring this sum to have a saddle-point for $n^*$ leads to (\ref{realvanish}). In the large $N$ limit, it has been proposed in \cite{bde} to
replace the sum over $n$ by a {\it theta function}. One then gets the following asymptotic expansion in $g_s$,
\begin{equation}
\label{npinterior}
\begin{aligned}
& Z_{\gamma_1}(N, g_s)
= Z(N_1^*, N_2^*) \sum_{k} \sum_{m_i>0}\sum_{g_i>1-{m_i\over 2}} {g_s^{\sum_i (2g_i+m_i-2)}\over k! m_1!\,\dots\, m_k!}\,\,\, F_{g_1}^{(m_1)}\dots F_{g_k}^{(m_k)} \,\, \Theta_{\mu,\nu}^{(\sum_i l_i)}(F'_0/g_s,\tau) \\
&\quad =Z(N_1^*, N_2^*) \biggl\{ \Theta_{\mu,\nu} +g_s \Bigl(\Theta'_{\mu,\nu} F_1' + {1\over 6} \Theta_{\mu,\nu}'''\,F_0'''\Bigr) + {\cal O}(g_s^2) \biggr\}.
\end{aligned}
\end{equation}
The derivatives of the free energies $F_g$ are again w.r.t. $s$.
The theta function $\Theta_{\mu,\nu}$ with characteristics $(\mu,\nu)$ is defined by
\begin{equation}
\label{biget}
\Theta_{\mu,\nu}(u,\tau) = \sum_{n\in {\mathbb Z}} {\rm e}^{(n+\mu-N \epsilon)u}\,\,{\rm e}^{\pi {\rm i} (n +\mu-N \epsilon)\tau (n+\mu-N\epsilon)}\,\,{\rm e}^{2 {\rm i}\pi (n+\mu) \nu},
\end{equation}
and it is evaluated at
\begin{equation}
u={F_0'(N_i^*)\over g_s},
\qquad
\tau = {1\over 2\pi {\rm i}} F_0''.
\end{equation}
In the above equation, we have denoted
\begin{equation}
\epsilon={N_2^* \over N}, \qquad C={C_2 \over C_1}={\rm e}^{2 {\rm i}\pi\nu}.
\end{equation}
In standard matrix models we have $\mu=0$, although the term $N\epsilon$ might give an effective characteristic depending on the parity of $N$ \cite{bde}.
The asymptotics (\ref{npinterior}), discovered in \cite{bde}, was interpreted in in \cite{mpp} as a matrix model generalization of the oscillatory asymptotics along an anti-Stokes line. The singularities along anti--Stokes lines in nonlinear ODEs correspond here to zeros of the partition function, which are made possible due to the presence in (\ref{npinterior}) of the
theta function at leading order: when the theta function vanishes, the partition function vanishes at leading order in $g_s$. These zeros are then nothing but Lee--Yang zeros for the partition function
of the matrix model, which are known to occur along anti--Stokes lines \cite{ipz,ps}.
Notice that the term involving the theta function is
{\it not} analytic in $N$, in the same way that the asymptotics (\ref{oscila}) is not analytic at $x=\infty$ (even when expressed in terms of $z=x^{3/2}$). We have, for the free energy, the expansion
\begin{equation}
F=g_s^{-2} F_0(N_i^*)+ F_1(N_i^*) + \log \Theta_{\mu,\nu} +\cdots,
\end{equation}
so the oscillatory behavior is already present at next-to-leading order. Notice that this asymptotics has contributions which are not present in the standard large $N$ expansion (\ref{largeNas}).
Let us now come back to the example of the
cubic matrix model along $\kappa=\pi$. We have to find a new saddle satisfying (\ref{realvanish}). For ${\rm arg}(\kappa)=\pi$, $\zeta^{1/2}$ is purely imaginary, and it is easy to see from the structure of the genus zero free energy that its {\it real} part is {\it symmetric} in $t_1$, $t_2$. Therefore,
\begin{equation}\label{cubicts}
t_1=t_2
\end{equation}
solves the saddle-point equation
\begin{equation}
{\rm Re}\, {\partial F_0 \over \partial s}=0,
\end{equation}
at least for small $t$. We conclude that, on the anti--Stokes line and for
$t$ small enough, the partition function of the cubic matrix model is given by an expansion of the form (\ref{npinterior}). Since $C_1=C_2=1$ (this follows from
the fact that the path $\gamma_1$ is deformed into a sum of two steepest-descent paths), the characteristics of the theta function are $\mu=\nu=0$.
\begin{figure}[!ht]
\leavevmode
\begin{center}
\includegraphics[height=7cm]{cubicphase2.pdf}
\end{center}
\caption{The phase diagram of the cubic matrix model with potential (\ref{stokespot}), as a function of $2\pi/3\le \kappa\le \pi$ and real $t=g_s N$. The parameter $t$ is also supposed to
be small. The Stokes and anti--Stokes lines of the $N=1$ case, which occur for $\kappa=2\pi/3$ and $\kappa=\pi$, respectively, determine to a large extent the
phase diagram for small $t$. On the anti--Stokes line, the saddle value of $t_2$ is $t/2$, and the classical $1/N$ asymptotics requires corrections beyond the genus expansion. }
\label{cubicphase}
\end{figure}
The above analysis is only valid, strictly speaking, at $t\sim 0$. When $t$ is small but nonzero, the real part of the instanton action vanishes at a finite $t_c(\kappa)$ for each value of $2\pi/3<\kappa<\pi$. Therefore, the transition to a new saddle will occur along a line in the $t-\kappa$ plane defined by
\begin{equation}
{\rm Re}\left(A(t_c(\kappa), \kappa)\right) =0.
\end{equation}
We display the phase diagram of the model in \figref{cubicphase}. For a generic point along the
curve $t_c(\kappa)$ separating the two phases, there is a phase transition in which $N_2^*$ increases smoothly away from zero (this has been verified very explicitly in another, related model in \cite{mpp}). This transition is then of the type ``birth-of-a-cut" studied in for example \cite{eynardbirth}.
Notice that the smooth transition becomes discontinuous as $t\to 0$ i.e. it becomes the jump in asymptotics along the anti-Stokes line. For $t>0$, the discontinuity is smoothed out.
We then have a {\it deformed Stokes phenomenon} as we turn on the 't Hooft parameter.
The phase diagram that we have just sketched is
valid for small $t$. For arbitrary $t$ there is a very rich phase structure first described in \cite{david} and
which can be formalized in terms of so-called Boutroux curves, see \cite{bertola}. A recent analysis of the phase structure of the cubic matrix model can be found in \cite{alvarez}.
Based on this detailed example, we can extract some general conclusions for the study of asymptotics and non-perturbative effects in matrix models:
\begin{enumerate}
\item As we change the parameters of the model, the partition function (\ref{sumz}) is dominated by different large $N$ saddle points in different ``phases". There are phase transitions
among these different phases. The transitions occur as in asymptotic analysis: as we move in parameter space, large $N$ saddles which were exponentially suppressed are no longer
so and start contributing to the asymptotics. These transitions, triggered by large $N$ instantons, are called {\it large $N$ phase transitions}, and occur along generalized anti-Stokes lines.
\item The Stokes phenomenon is ``smoothed out" at finite 't Hooft parameter, even in the sense of classical asymptotics.
\item The $1/N$ expansion breaks down along anti-Stokes lines. In the Hermitian case, the classical asymptotics involves
theta functions with a non-analytic dependence on $N$.
\end{enumerate}
A more detailed study of these phenomena, together with an analysis of the so-called lens space matrix model, can be found in \cite{mpp}.
\sectiono{Two applications in String Theory}
It is impossible to provide here a detailed overview of non-perturbative effects in string theory. We will content ourselves with a discussion of
some string theory models where one can use large $N$ matrix models to describe non-perturbative effects.
From the point of view discussed in these lectures, it is useful to focus on string free energies and to regard string theory as a quantum mechanical system with two different Planck constants.
The {\it worldsheet} Planck constant is given by the square of the string length,
\begin{equation}
\hbar_{\rm ws} =\ell_s^2.
\end{equation}
The free energy at genus $g$, $F_g(t)$, depends on a set of target moduli $t$ (this could be a compactification radius or a K\"ahler parameter),
and the perturbative regime of the worldsheet
theory corresponds to the regime in which these moduli are very large as compared to the string length. Non-perturbative effects with respect to
this Planck constant are of the form
\begin{equation}
\exp\left( -A_{\rm ws}(t)/\ell_s^2\right),
\end{equation}
where $A_{\rm ws}(t)$ is a worldsheet instanton action. These effects are usually
realized in string theory as instantons of the non-linear sigma model underlying perturbative string theory.
There
is however a second, {\it spacetime} Planck constant called the string coupling constant,
\begin{equation}
\hbar_{\rm st}=g_s.
\end{equation}
The contribution of a genus $g$ worldsheets to the free energy is weigthed by the factor $g_s^{2g-2}$, and the perturbative free energy is given by a genus expansion
of the form
\begin{equation}
F^{(0)}(t,g_s) =\sum_{g=0}^{\infty} F_g(t) g_s^{2g-2}.
\end{equation}
This has the same structure that the large $N$ expansion of a gauge theory or a matrix model, compare to (\ref{largeNas}), and it is believed that,
generically, the genus $g$ free energies grow as $(2g)!$, just as in large $N$ theories \cite{grossperiwal, shenker}.
We should then expect non-perturbative effects w.r.t. this second Planck constant, of the form
\begin{equation}
\label{twop}
\exp\left( -A_{\rm st} (t)/g_s\right)
\end{equation}
where $A_{\rm st} (t)$ is the action of a {\it spacetime} instanton. These effects
were first found experimentally in non-critical string theory \cite{david,shenker}, and later on it
was proposed by Polchinski that they are realized by D-branes \cite{polchinski}.
In general, the calculation of these non-perturbative effects directly in string theory is not easy.
However, when the string theory model has a large $N$ gauge theory dual, one can in principle compute them in the gauge theory.
Worldsheet instantons appear as exponentially small corrections in the 't Hooft expansion at strong 't Hooft coupling,
while spacetime instantons should correspond to large $N$ instantons.
In some cases, the large $N$ dual further reduces to a matrix model, and the techniques described in these
lectures can then be applied to calculate non-perturbative effects in string theory. In particular, the free energy of non-critical strings is described by non-linear ODEs, and non-perturbative effects in these theories can be analyzed with the trans-series formalism presented in section \ref{ODEs}
(see also \cite{taiwan} for recent developments on non-critical strings).
We will now describe two examples where the ideas developed in these lectures lead to non-perturbative results in string theory,
and which go beyond non-critical string theory.
\subsection{A toy model: Trans-series and Hurwitz theory}
Hurwitz theory can be regarded as a toy model for string theory (or, more precisely, for topological string theory). In Hurwitz theory one considers maps from a Riemann surface or string, to another Riemann surface, and ``counts" these maps in an appropriate form, by using Hurwitz numbers. Here, for simplicity, we will assume that the target is ${\mathbb P}^1$, i.e. a two-sphere. The only parameters in Hurwitz theory are the string coupling constant $g_H$ and a target parameter $t_H$ which can be regarded as measuring the ``size" of the target. The partition function of this model is of the form
\begin{equation}
Z(t_H,g_H)=\sum_{g\ge 0} g_H^{2g-2} \sum_{d\ge 0} {H_{g,d}^{{\mathbb P}^1} (1^d) \over (2g-2+2d)!}Q^d,
\end{equation}
where $Q={\rm e}^{-t_H}$. The dependence on $g_H$ is the standard one for a sum over genera in string theory. The quantity
$H_{g,d}^{{\mathbb P}^1} (1^d)$ is a simple Hurwitz number counting degree $d$ covering maps of ${\mathbb P}^1$, with simple branch points only, and by Riemann surfaces
of genus $g$ (see for example \cite{mswone} for an explicit expression for this number in terms of representation theory data). Here, in the partition function, we consider maps from generally disconnected worldsheets. The free energy $F=\log\, Z$ describes connected, simple
Hurwitz numbers $H_{g,d}^{{\mathbb P}^1} (1^d)^{\bullet}$,
\begin{equation}\label{freehurwitz}
F(g_H,t_H) = \sum_{g\ge 0} g_H^{2g-2} F_g(Q),
\end{equation}
where
\begin{equation}
F_g(Q)=\sum_{d\ge 0} {H_{g,d}^{{\mathbb P}^1} (1^d)^{\bullet} \over (2g-2+2d)!}Q^d.
\end{equation}
This theory is in fact a string theory in disguise. It can be realized as a special limit of topological string theory on certain toric Calabi--Yau manifolds,
see for example \cite{bp,italy} for detailed derivations. It was also conjectured in \cite{mm} and proved in \cite{eynardproof} that Hurwitz theory can be described in terms of
matrix integrals, and this in turn was used in \cite{mswone} to compute instanton effects, by using the techniques reviewed in subsection \ref{direct-cal}. As shown in \cite{mmnp} there is however another way to understand non-perturbative effects in this theory, by using difference equations as in subsection \ref{diff-matrix}.
It was proved in \cite{p} that the free energy of Hurwitz theory satisfies a difference equation of the Toda type,
\begin{equation}
\label{toda}
\exp \Bigl( F(t_H+g_H) +F(t_H-g_H)-2 F(t_H)\Bigr)= g_H^2 {\rm e}^{t_H} \partial_{t_H}^2 F(t_H,g_H).
\end{equation}
As we did in subsection \ref{diff-matrix}, we can try to solve this equation with a trans-series ansatz for the free energy of the form (\ref{fullf}). Doing this one immediately obtains the following equation for the one-instanton amplitude,
\begin{equation}
\label{hurwitzone}
\exp \Bigl( \Delta_{g_H} F^{(0)}(t_H)\Bigr) \Delta_{g_H} F^{(1)}(t_H) = g_H^2 {\rm e}^{t_H} \partial_{t_H}^2 F^{(1)},
\end{equation}
where we have written
\begin{equation}
\Delta_{h} f(t)=
f(t+h) +f (t-h)-2 f(t)
\end{equation}
to denote the discrete Laplace operator with step $h$. The first term in the expansion of (\ref{hurwitzone}) in
powers of $g_H$ gives an equation for $A'(t_H)$,
\begin{equation}
\label{hurins}
2\Bigl[ \cosh (A'(t_H)) -1\Bigr]=\exp\left\{ t_H-\partial_{t_H}^2 F^{(0)}_0(t_H) \right\} (A'(t_H))^2.
\end{equation}
It is straightforward to use (\ref{toda})
to derive a one-parameter, trans-series solution for the free energy of the Hurwitz model. In addition, one can check that the function
$A(t_H)$ defined implicitly by (\ref{hurins}) coincides with
the instanton action computed in \cite{mswone} from the matrix model realization of \cite{mm,eynardproof}. This instanton action controls the large genus behavior
of the free energies $F_g(Q)$ of Hurwitz theory. We conclude that, in this toy model of string theory, the perturbative sector obtained by considering Hurwitz coverings can be generalized to a general trans-series. However, the geometric or physical meaning of this trans-series is not yet known.
The Hurwitz model that we have discussed can be generalized to a family of topological string theories on non-compact Calabi--Yau manifolds
(the so-called ``local curves") where non-perturbative effects can be also computed by using matrix model techniques, see \cite{mmopen,mswone,mmlocal}.
\subsection{Stringy instantons in ABJM theory}
We will now describe an application in physical superstring theory, namely ABJM theory \cite{abjm} and its type IIA dual.
ABJM theory \cite{abjm,abjmreview} is a Chern--Simons--matter theory in
three dimensions with gauge group $U(N)_k \times U(N)_{-k}$ and ${\cal N}=6$ supersymmetry. It is a conformally invariant theory.
The Chern--Simons actions for the gauge groups are of the form (\ref{csact}) and they
have couplings $k$ and $-k$, respectively. The theory contains as well four hypermultiplets $C_I$, $I=1, \cdots, 4$,
in the bifundamental representation of the gauge group. The 't Hooft parameter of this theory is defined as
\begin{equation}
\lambda={N\over k}.
\end{equation}
In \cite{kwy} it was shown, through a beautiful application of localization
techniques, that the partition function of ABJM theory on the three-sphere can be computed by a matrix model (see \cite{mmcslectures} for a pedagogical review). This matrix model is given by
\begin{equation}
\label{kapmm}
Z_{\rm ABJM}(N, g_s)={1\over N!^2} \int \prod_{i=1}^{N}{ {\rm d} \mu_i {\rm d} \nu_j \over (2 \pi)^2} {\prod_{i<j} \sinh^2 \left( {\mu_i -\mu_j \over 2}\right) \sinh^2 \left( {\nu_i -\nu_j \over 2}\right) \over
\prod_{i,j} \cosh^2 \left( {\mu_i -\nu_j \over 2}\right)} {\rm e}^{-{1\over 2g_s}\left( \sum_i \mu_i^2 -\sum_j \nu_j^2\right)},
\end{equation}
where the coupling $g_s$ is related to the Chern--Simons coupling $k$ of ABJM theory as
\begin{equation}
g_s={2 \pi {\rm i} \over k}.
\end{equation}
The reduction of the partititon function of the theory to a matrix integral makes it possible to address some of the questions considered in these lectures in a very concrete way. For example, one can wonder, as we did in subsection \ref{nonpert-cs} for Chern--Simons theory, what is the large order behavior of perturbation theory for the partition function or the free energy. For fixed $N=2$, it has been shown in \cite{russo} that the perturbative expansion in powers of $g_s$ grows factorially but it is Borel summable. A different, more complicated question concerns the behavior of the free energy in the large $N$ expansion, i.e.: what is the behavior of the genus $g$ free energies? What are the non-perturbative effects at large $N$? These issues were addressed in \cite{dmp,dmpnp}, which we now review.
The above matrix integral turns out to be a close cousin of the matrix integral for CS theory mentioned in (\ref{betain}). Although in principle it is not of the form (\ref{zmm}), it can be put in such a form by an appropriate change of variables. Moreover, all the notions introduced in the previous section to analyze the large $N$ limit
of conventional matrix models --such as resolvent, spectral curve, and the like-- can be generalized to the ABJM matrix model.
It turns out that, at large $N$, the ABJM matrix model is a two-cut matrix model. In terms of the variables
\begin{equation}
Y={\rm e}^y,\;X={\rm e}^x,
\end{equation}
the spectral curve is given by the equation \cite{akmv,hy,mp,dmp}
\begin{equation}
\label{abjmcurve}
Y+\frac{X^2}{Y}-X^2+{\rm i}\kappa\,X-1=0\,.
\end{equation}
The Riemann surface of (\ref{abjmcurve}) can be represented by two $X$-planes glued along the cuts $[1/a,a]$ and $[-b,-1/b]$. The position of the endpoints can be determined from
\begin{equation}
a+\frac{1}{a}+b+\frac{1}{b}=4,\qquad
a+\frac{1}{a}-b-\frac{1}{b}=2{\rm i}\kappa\,.
\end{equation}
The variable $\kappa$ can be regarded as the complex modulus of the spectral curve.
As in (\ref{tper2}), the 't Hooft parameter can be obtained as a period of the spectral curve and
it is related to $\kappa$ by \cite{mp}
\begin{equation}
\label{lamkap}
\lambda(\kappa)={\kappa \over 8 \pi} {~}_3F_2\left(\frac{1}{2},\frac{1}{2},\frac{1}{2};1,\frac{3}{2};-\frac{\kappa^2
}{16}\right).
\end{equation}
The free energy of the ABJM matrix model has a $1/N$ expansion
of the form
\begin{equation}
\label{fgs}
F(\lambda,g_s)=\sum_{g=0}^{\infty} g_s^{2g-2} F_g(\lambda).
\end{equation}
The genus zero free energy can be obtained from (\ref{dfy}) as \cite{dmp,mmcslectures}
\begin{equation}
\label{comf}
\partial_\lambda F_0={\kappa \over 4} G^{2,3}_{3,3} \left( \begin{array}{ccc} {1\over 2}, & {1\over 2},& {1\over 2} \\ 0, & 0,&-{1\over 2} \end{array} \biggl| -{\kappa^2\over 16}\right)+{ \pi^2 {\rm i} \kappa \over 2}
{~}_3F_2\left(\frac{1}{2},\frac{1}{2},\frac{1}{2};1,\frac{3}{2};-\frac{\kappa^2
}{16}\right),
\end{equation}
wheere $G^{2,3}_{3,3}$ is a Meijer function. Near $\lambda=0$ this planar free energy has a logarithmic singularity, but this is due to the Gaussian
part of the free energy, and once this part is subtracted (as in (\ref{g-sub}), with $d=2$ since there are two cuts) we obtain an analytic function at $\lambda=0$, as expected. There are singularities in the complex plane of the 't Hooft parameter, signaling the finite radius of convergence of the series. The location of the singularities can be found explicitly from the above expressions. Indeed, the singularities correspond to the branch point of the hypergeometric functions involved, at
\begin{equation}
\kappa =\pm 4 {\rm i},
\end{equation}
which correspond to
\begin{equation}
\label{critpoint}
\lambda=\mp{2 {\rm i} K \over \pi^2},
\end{equation}
where $K$ is Catalan's constant. For reasons explained in \cite{dmp}, these points are called the {\it conifold} points.
In \cite{dmp} it was also shown that, for $g\ge 1$, the free energies can be written in terms of quasi-modular forms of the
modular parameter of the elliptic curve (\ref{abjmcurve}).
\begin{equation}
\label{tauex}
\tau={\rm i} {K'\left({{\rm i} \kappa \over 4}\right)\over K \left({{\rm i} \kappa \over 4}\right)}.
\end{equation}
For $g=1$, one simply has
\begin{equation}
F_1=-\log \, \eta (\tau),
\end{equation}
where $\eta$ is the usual Dedekind eta function. For $g\ge 2$, the $F_g$ can be written in terms of the quasi-modular forms $E_2$ (the standard Eisenstein series), $b$ and $d$, where
\begin{equation}
b=\vartheta_2^4(\tau), \qquad d=\vartheta_4^4(\tau),
\end{equation}
are standard Jacobi theta functions. More precisely, we have the general structure
\begin{equation}
F_g(\lambda)={1\over \left( b d^2 \right)^{g-1}} \sum_{k=0}^{3g-3}
E_2^{k}(\tau) p^{(g)}_k(b,d), \qquad g\ge 2,
\end{equation}
where $p^{(g)}_k(b,d)$ are polynomials in $b, d$ of modular weight $6g-6-2k$. In \cite{dmp} a recursive procedure was described which gives
all the genus $g$ free energies unambiguously. The genus $g$ free energies $F_g(\lambda)$ obtained in this way are exact interpolating functions of the 't Hooft parameter, and they can be studied in various regimes. When $\lambda\rightarrow 0$ they reproduce the perturbation theory of the matrix model (\ref{kapmm}) around the Gaussian point $\lambda=0$, where they behave, as expected, as two copies of the Gaussian matrix model,
\begin{equation}
F_g(\lambda)=- {B_{2g} \over g (2g-2)} (2 \pi {\rm i} \lambda)^{2-2g}+{\cal O}(\lambda).
\end{equation}
The genus $g$ free energies can be also studied in the strong coupling regime $\lambda \rightarrow \infty$, or, equivalently, at $\kappa \rightarrow \infty$. In this regime it is more convenient to use the shifted variable
\begin{equation}
\label{hatl}
\hat \lambda =\lambda -{1\over 24}={\log ^2\kappa\over 2 \pi^2}+{\cal O}( \kappa^{-2}), \qquad \kappa \gg 1.
\end{equation}
One finds the following structure. For $F_0$ and $F_1$ one has, at strong coupling,
\begin{equation}
\begin{aligned}
F_0&={4 \pi^3 {\sqrt{2}} \over 3} \hat \lambda^{3/2} + {\zeta(3) \over 2} + {\cal O}\left({\rm e}^{-2 \pi {\sqrt{2 \hat \lambda}}}\right), \\
F_1&={1\over 6} \log \kappa -{1\over 2} \log\left[ {2 \log \kappa \over \pi} \right]+ {\cal O}\left( {1\over \kappa^2} \right).
\end{aligned}
\end{equation}
For $g\ge 2$ one has
\begin{equation}
\label{fgl}
F_g=c_g+ f_g\left( {1\over \log \, \kappa} \right)+ {\cal O}\left( {1\over \kappa^2} \right),
\end{equation}
where
\begin{equation}
c_g=- {4^{g-1} |B_{2g} B_{2g-2}| \over g (2g-2) (2g-2)!}
\end{equation}
can be interpreted as the contribution from constant maps to the free energy, and
\begin{equation}
f_g(x)=\sum_{j=0}^g c_j^{(g)}x^{2g-3+j}
\end{equation}
is a polynomial\footnote{The interpolating functions $F_g$ computed in \cite{dmp} did not include the constant map contribution. This was corrected in \cite{hanada}.}. The leading, strong coupling behavior is then given by
\begin{equation}
\label{leadingfg}
F_g(\lambda)-c_g\sim \lambda^{{3\over 2}-g}, \qquad \lambda \to \infty, \quad g\ge2.
\end{equation}
We can now ask what is the large order behavior of $F_g(\lambda)$ and what are the possible instanton configurations underlying this behavior.
First of all, we notice that the $\lambda$-independent part $c_g$ of $F_g$ has the large genus behavior,
\begin{equation}
\label{loconstant}
c_g \sim -(2\pi^2)^{-2g} \Gamma(2g-1),
\end{equation}
and corresponds to a constant instanton action governing this constant map contribution \cite{ps},
\begin{equation}
A_m=2 \pi^2.
\end{equation}
In order to identify other instanton actions contributing to the asymptotics, one can look at vanishing periods of the meromorphic form $y(x) {\rm d} x$,
as suggested by the discussion in subsection \ref{inst-spectral}. There are two obvious vanishing periods, namely the ``Gaussian" period vanishing at $\lambda=0$,
\begin{equation}
\label{wca}
A_{w}(\kappa)=-4 \pi^2 \, \lambda (\kappa).
\end{equation}
There is another period which vanishes at the conifold point $\kappa=\pm 4 {\rm i}$, given by
\begin{equation}
\label{wiac}
A_{c}(\kappa)= {{\rm i} \over \pi} {\partial F^{(w)}_0 \over \partial \lambda} +4 \pi^2 \lambda \pm \pi^2 \\
=\frac{{\rm i} \kappa}{4\pi} G^{2,3}_{3,3} \left(\left.
\begin{array}{ccc}
\frac{1}{2}, & \frac{1}{2},& \frac{1}{2} \\
0, & 0,&-\frac{1}{2}
\end{array} \right| -\frac{\kappa^2}{16}\right)\pm \pi^2.
\end{equation}
Finally, there is a linear combination of both instanton actions,
\begin{equation}
\label{sca}
A_{s}(\kappa)=A_{w}(\kappa)+A_{c}(\kappa).
\end{equation}
It was shown in \cite{dmpnp} that these three instanton actions control the large order behavior of $F_g(\lambda)-c_g$ in different regions of the complex plane of the
't Hooft parameter. Near the Gaussian or weakly coupled point, it is (\ref{wca}) which controls the large order behavior. Near the conifold point (\ref{critpoint}), it is (\ref{wiac}) which dominates. Finally, in the strong coupling region $\lambda \gg 1$, the large order behavior is controlled by (\ref{sca}). Notice that we can regard the behavior of (\ref{loconstant}) as controlled by a constant period, which always exists in these models.
We can now discuss Borel summability. In the physical ABJM theory, $\lambda$ is real and $g_s$ is purely imaginary. The expansion (\ref{fgs}) should be written in terms of the real coupling constant
$2\pi/k$, i.e. as
\begin{equation}
F(\lambda, k)=\sum_{g=0}^{\infty} \left( {2\pi \over k}\right)^{g-2} (-1)^{g-1} F_g(\lambda).
\end{equation}
We get an extra $(-1)^{g-1}$ sign at each genus.
Equivalently, this leads to an extra $-{\rm i}$ factor in the instanton actions computed above. We can now ask whether this factorially divergent
series is Borel summable or not. At strong coupling $\lambda\gg 1$, the behavior of the genus $g$ free energy $(-1)^{g-1} F_g(\lambda)$
is dominated by the constant period $-{\rm i} A_m=-2 {\rm i} \pi^2$, which is purely imaginary. We then obtain a Borel summable series. Even after subtracting the constant map contribution, we obtain a Borel-summable series at strong coupling. Indeed, the strong coupling action (\ref{sca}), which controls the asymptotics in this regime, is complex:
%
\begin{equation}
{\rm Im}\left( -{\rm i} A_s(\lambda)\right) =\pi^2,
\end{equation}
%
and for large $\lambda$ we have,
\begin{equation}
\label{iaction}
-{\rm i} A_s(\lambda)=2 \pi^2 {\sqrt{2 \lambda}} + \pi^2 {\rm i} + {\cal O}\left({\rm e}^{-2 \pi {\sqrt{2\lambda}}}\right), \qquad \lambda \gg 1.
\end{equation}
Interestingly, it was shown in \cite{dmpnp} that the first, leading term of this action at strong coupling coincides with the action of a D2-brane wrapping ${\mathbb R}{\mathbb P}^3$ inside ${\mathbb C} {\mathbb P}^3$. This seems to indicate that this instanton of the matrix model corresponds indeed to a D2-brane in the dual type IIA string theory, as expected from general arguments in superstring theory \cite{polchinski}.
\sectiono{Concluding remarks}
In these lectures I have tried to provide a pedagogical introduction to some aspects of non-perturbative effects in quantum theory. I have
focused on topics which are not covered in detail in the classic reviews on the subject, like large $N$ gauge theories and matrix models, with a view towards
applications to string theory. It is clear that, in many respects, this topic is still in its infancy. Even in the context of large $N$ matrix models, where many explicit results are available, there are still some important open problems. For example, there are no detailed results on the large order asymptotics for the genus $g$ free energies in multi-cut phases, i.e.
there is no analogue of equation (\ref{lostringl}) for these phases; only the instanton action $A$ has been identified in some cases, as discussed in subsection \ref{inst-spectral}.
The ideas of resurgence, which have been very powerful in the context
of ODEs and Quantum Mechanics, have scarcely been used in QFT (see however \cite{au} for recent work in this direction). In the case of string theory,
these ideas have been explored in \cite{mmnp,asv}, but mostly in the case of non-critical strings and toy models with large $N$ duals, and much remains to be done. I only hope that these lectures
will be useful for future research along these directions.
\section*{Acknowledgments}
A first version of these lectures was presented in the research program ``Topological String Theory, Modularity and Non-Perturbative Physics"
at the Erwin Schr\"odinger Institute in Vienna, in the summer of 2010. I would like to thank the organizers, and particularly Albrecht Klemm, for the invitation to
give these lectures. I would also like to thank all my collaborators in the work described here: Nadav Drukker,
Bertrand Eynard, Stavros Garoufalidis, Alexander Its,
Andrei Kapaev, Albrecht Klemm, Sara Pasquetti, Pavel Putrov,
Marco Rauch, Ricardo Schiappa, and Marlene Weiss. Special thanks to
Ricardo Schiappa, who was kind enough to read the whole set of lectures and made
many invaluable comments. This work is supported in part by the
Fonds National Suisse, subsidies 200020-126817 and
200020-137523.
|
{
"timestamp": "2014-02-03T02:05:11",
"yymm": "1206",
"arxiv_id": "1206.6272",
"language": "en",
"url": "https://arxiv.org/abs/1206.6272"
}
|
\section{Introduction}
According to numerous microscopic calculations
(e.g., \citealt*{yls99, ls01} and references therein),
nucleons and hyperons
in the internal layers of neutron stars (NSs) can become superfluid
at temperatures $T \la 10^8 \div10^{10}$~K.
Superfluidity has a strong impact on the thermal evolution
of NSs, their oscillations, and (most probably) leads to
such observational phenomena
as glitches (\citealt*{ai75})
and pulsar spin precession (\citealt*{shaham77, lc02}).
Recent real-time observations (\citealt*{hh10})
of a cooling NS in Cassiopea A supernova remnant
give strong arguments
that the star has superfluid core (\citealt{syhhp11,ppls11}).
The aim of this short note is to point out
the importance
of one effect related to superfluidity of baryons in NSs
that has usually been ignored in the NS literature.
In Sec.\ II we outline the effect.
In Sec.\ III we demonstrate its efficiency.
In Sec.\ IV we discuss possible
consequences for the physics of NSs
and in Sec.\ V we conclude.
We use the system of units
in which $k_{\rm B}=\hbar=1$.
\section{A simple problem and the proposed effect}
Let us consider a degenerate Fermi-liquid
composed of identical particles of mass $m$.
Assume that they interact through a weakly attractive potential so that
BCS theory (see, e.g., \citealt*{lp80}) is applicable.
Assume also that they pair
(become superfluid)
in the spin-singlet $^1S_0$ state
at temperatures $T$ below some critical temperature $T_{\rm c}$.
The role of elementary excitations in such superfluid Fermi-liquid
is played by Bogoliubov excitations (see, e.g., \citealt*{feynman72}).
In what follows, all equations will be
written in a reference frame in which
the mean (hydrodynamic) velocity ${\pmb V}_{\rm q}$
of Bogoliubov excitations vanishes, ${\pmb V}_{\rm q}=0$
(i.e., normal liquid component is at rest).
In the absence of superfluid current
(when the superfluid velocity
${\pmb V}_{\rm s}=0$)
the energy $E_{\pmb p}$ of a Bogoliubov excitation
with momentum ${\pmb p}$
near the Fermi surface can be written as
\begin{equation}
E_{\pmb p}=\sqrt{v_{\rm F}^2 (|{\pmb p}|-p_{\rm F})^2 + \Delta^2},
\label{disp1}
\end{equation}
where $v_{\rm F}$ and $p_{\rm F}$
are the Fermi-velocity and Fermi-momentum, respectively;
and $\Delta$ is the energy gap,
given by the standard equation (\citealt{lp80}),
\begin{equation}
1 = - V_0 \sum_{{\pmb p}} \frac{1- 2 f_{{\pmb p}}}{2 E_{{\pmb p}}},
\label{gap1}
\end{equation}
where $V_0$ is the (constant) pairing potential and
\begin{equation}
f_{\pmb p}=\frac{1}{{\rm e}^{E_{\pmb p}/T}+1}
\label{fp}
\end{equation}
is the Fermi-Dirac distribution function for Bogoliubov excitations.
If, however, the superfluid current
is present (${\pmb V}_{\rm s}\neq 0$)
then fermions pair with momenta $(-{\pmb p}+{\pmb Q}, {\pmb p}+{\pmb Q})$
rather than with $(-{\pmb p}, {\pmb p})$,
and the total momentum of a Cooper pair is
\begin{equation}
2 \, {\pmb Q}= 2 \, m {\pmb V}_{\rm s}.
\label{Q}
\end{equation}
What will be the equation for the gap?
The answer can be found in \cite{bardeen62}
and is well known in the physics of superconductors.
Now, instead of Eq.\ (\ref{gap1}), one should write
\begin{equation}
1 = - V_0 \sum_{\pmb p}
\frac{1-\mathcal{F}_{{\pmb p}+{\pmb Q}}-\mathcal{F}_{-{\pmb p}+{\pmb Q}}}{2 E_{\pmb p}}.
\label{gap2}
\end{equation}
Here $\mathcal{F}_{{\pmb p}+{\pmb Q}}$
is the distribution function for Bogoliubov
excitations with momentum $({\pmb p}+{\pmb Q})$
in the system with non-zero ${\pmb V}_{\rm s}$,
\begin{equation}
\mathcal{F}_{{\pmb p}+{\pmb Q}}= \frac{1}{{\rm e}^{\mathfrak{E}_{{\pmb p}+{\pmb Q}}/T}+1},
\label{Fp2}
\end{equation}
where
\begin{equation}
\mathfrak{E}_{{\pmb p}+{\pmb Q}} \approx \frac{{\pmb p} {\pmb Q}}{m} + E_{\pmb p}
\label{disp2}
\end{equation}
is the energy of a Bogoliubov excitation with momentum $({\pmb p}+{\pmb Q})$.
In Eq.\ (\ref{disp2}) we assumed $Q \ll p_{\rm F}$
which is true in all interesting cases
(see, e.g., \citealt*{gh05,gkh09a,gkh09b}).
Eq.\ (\ref{gap2}) can be written in terms of the quantity $\Delta_0$,
which is the energy gap at $T=0$ and ${\pmb Q}=0$.
It satisfies Eq.\ (\ref{gap1}) with $f_{\pmb p}=0$.
Using it, one can present Eq.\ (\ref{gap2}) in the form
\begin{equation}
\frac{p_{\rm F} m}{\pi^2} \,\, {\rm ln}\left(\frac{\Delta_0}{\Delta}\right) =
\sum_{\pmb p} \frac{\mathcal{F}_{{\pmb p}+{\pmb Q}}+\mathcal{F}_{-{\pmb p}+{\pmb Q}}}{E_{\pmb p}}.
\label{gap3}
\end{equation}
The solution to this equation gives the gap $\Delta$
as a function of $T$ and $Q=|{\pmb Q}|$.
First consider two limiting cases
in which $\Delta(T, \, Q)$ vanishes.
($i$) if ${Q}=0$ then $\Delta=0$ at
\begin{equation}
T=T_{\rm c}\approx 0.567 \Delta_0 \,\,\,{\rm ( the \, well \, known \, BCS \, result);}
\label{Tc}
\end{equation}
($ii$) if $T=0$ then $\Delta=0$ at
\begin{equation}
Q \equiv Q_{\rm cr \, 0}= \frac{\rm e}{2} \, \,\frac{\Delta_0 \, m}{p_{\rm F}}.
\label{Qcr}
\end{equation}
The latter result is less known but can be found, e.g., in \cite{alexandrov03}.
Notice that, the well-known Landau criterion
for superfluidity breaking
gives $Q_{\rm cr \, 0}^{({\rm Landau})} =\Delta_0\, m/p_{\rm F}$
and is not accurate for
a superfluid Fermi-liquid.
\begin{figure}
\begin{center}
\leavevmode
\epsfxsize=2.0in \epsfbox{1.eps}
\end{center}
\caption{The energy gap $\Delta$ (in units of $\Delta_0$)
versus $Q=m V_{\rm s}$ [in units of $Q_{\rm cr \, 0}$, see Eq.\ (\ref{Qcr})]
for a set of temperatures
$T/T_{\rm c}=0.1$, $0.4$, $0.6$, $0.75$, $0.85$, and $0.95$.
}
\label{Fig:gap_V}
\end{figure}
Some numerical solutions to Eq.\ (\ref{gap3})
are presented in Figs.\ 1 and 2.
Fig.\ 1 shows the gap $\Delta(T, \, Q)$ [in units of $\Delta_0$]
versus momentum $Q$ [in units of $Q_{\rm cr \, 0}$]
for a set of temperatures $T/T_{\rm c}=0.1$, $0.4$, $0.6$,
$0.75$, $0.85$, and $0.95$.
One sees that $\Delta$ is quite sensitive to variation
of $Q = m \, V_{\rm s}$
as long as $T \ga 0.1 T_{\rm c}$.
Another important conclusion that can be drawn
from Fig.\ 1
is that (for a given $T$)
the maximum critical momentum $Q_{\rm cr}$
strongly depends on temperature.
Fig.\ 2 illustrates this point more clearly.
In the left panel we plot $Q_{\rm cr}$ (in units of $Q_{\rm cr \, 0}$)
versus $T$ (in units of $T_{\rm c}$).
The right panel shows the same dependence $Q_{\rm cr}(T)$ but
with $Q_{\rm cr}$ measured in units of
\begin{equation}
Q_{\rm cr}^{({\rm app})}(T) \equiv \frac{{\rm e}}{2} \,\, \frac{\Delta(T,\, 0)\, m}{p_{\rm F}}.
\label{Qcrapp}
\end{equation}
We see that
$Q_{\rm cr}$ changes with $T$ in such a way that
$Q_{\rm cr}(T)/Q_{\rm cr}^{({\rm app})}(T)$ is roughly constant.
Therefore,
the energy gap $\Delta$ can be a strong function of the momentum
${\pmb Q}=\, m\, {\pmb V}_{\rm s}$ or, in an arbitrary frame,
a strong function of the difference
$m \, ({\pmb V}_{\rm s}-{\pmb V}_{\rm q}) \equiv m \, \Delta {\pmb V}$.
We will refer to this effect as to the
`$\Delta {\pmb V}$-effect'.
\begin{figure}
\begin{center}
\leavevmode
\epsfxsize=3.3in \epsfbox{2.eps}
\end{center}
\caption{Left panel: $Q_{\rm cr}$ (in units of $Q_{\rm cr \, 0}$)
versus $T$ (in units of $T_{\rm c}$).
Right panel: The same as in the left panel but
$Q_{\rm cr}$ is in units of $Q_{\rm cr}^{({\rm app})}$
[see Eq.\ (\ref{Qcrapp})].
}
\label{Fig:Qcr_T}
\end{figure}
The critical value $\Delta V_{\rm cr}(T)$
of $\Delta V = |{\pmb V}_{\rm s}-{\pmb V}_{\rm q}|$,
at which superfluidity dies out,
is easily estimated
by taking
$Q_{\rm cr} \sim Q_{\rm cr}^{\rm (app)}$.
Then, from Eq.\ (\ref{Qcrapp}), we obtain
\begin{equation}
\Delta V_{\rm cr}(T) \sim 10^7 \left[\frac{\Delta(T, \, 0)}{10^9 \, {\rm K}}\right] \,
\left(\frac{n_0}{n} \right)^{1/3} \,\,\, {\rm cm} \,\, {\rm s}^{-1},
\label{VsVq}
\end{equation}
where $\Delta(T,\, 0)$ is measured in Kelvins;
$n_0=0.16$~fm$^{-3}$ is the nucleon density in atomic nuclei;
$n=p_{\rm F}^3/(3 \pi^2)$ is the particle number density.
\section{Importance of the $\Delta {\pmb V}$-effect
for neutron stars}
If the difference $\Delta V$ between
the baryon superfluid velocities and a normal velocity
is comparable to $\Delta V_{\rm cr}$,
then the baryon energy gaps
can be substantially reduced.
A few interesting consequences
of this `dynamical reduction' of the gaps
are discussed in the next section.
Here we illustrate possible importance of
the $\Delta {\pmb V}$-effect
by considering radial oscillations
of a nonrotating superfluid NS
whose core
is composed of neutrons, protons, and electrons.
The main question
is at what oscillation amplitude
$\Delta V$ becomes
comparable to $\Delta V_{\rm cr}$?
For simplicity we
($i$) assume that
neutrons pair in the spin-singlet ($^1S_0$) state
[rather than in the triplet ($^3P_2$) state]
and ($ii$) neglect the Landau quasiparticle interaction between quasinucleons
when calculating $\Delta_{\rm n}(T,\, {\pmb V}_{{\rm s}\rm n} -{\pmb V}_{\rm q})$
[here and below the subscripts $\rm n$, $\rm p$, and $\rm e$
refer to neutrons, protons, and electrons,
respectively]\footnote{
Let us remark that
to calculate $\Delta_{\rm n}$ and $\Delta_{\rm p}$ as functions of
$({\pmb V}_{{\rm s}\rm n} -{\pmb V}_{\rm q})$ and
$({\pmb V}_{{\rm s}\rm p} -{\pmb V}_{\rm q})$
{\it with} allowance for interactions between quasiparticles,
one should follow the derivation of \cite{gh05}.
Namely, one should self-consistently solve
equations (31) and (32) of that reference without
making assumptions (34)--(37),
which are valid only for small
relative velocities between the superfluid and normal components.
As a result of this calculation,
one will find that, generally,
$\Delta_{\rm n}$ (and $\Delta_{\rm p}$) is a function
of {\it both} $({\pmb V}_{{\rm s}\rm n} -{\pmb V}_{\rm q})$
and
$({\pmb V}_{{\rm s}\rm p} -{\pmb V}_{\rm q})$.
This interesting property is a consequence of entrainment between baryons
of different species;
we will discuss it in more detail elsewhere.
}.
The NS model used here and all the microphysics input are essentially
the same as in \cite*{kg11};
we refer the reader to that work for more details.
In particular, we consider
the star of gravitational mass $M=1.4 M_{\odot}$,
circumferential radius $R=12.2$~km,
central density $\rho_{\rm c}=9.26 \times 10^{14}$~g~cm$^{-3}$,
and adopt the APR EOS in the NS core (\citealt*{apr98}).
The model of nucleon superfluidity employed here
coincides with the model 3 of \cite{kg11}
and is shown in Fig.\ 3.
The left panel of Fig.\ 3 presents nucleon critical temperatures
$T_{{\rm c}\rm n}$ and $T_{{\rm c}\rm p}$
versus density $\rho$ in the NS core,
the right panel demonstrates
the red-shifted critical temperatures
$T_{{\rm c}\rm n}^{\infty}\equiv T_{{\rm c}\rm n} \, {\rm e}^{\nu/2}$
and $T_{{\rm c}\rm p}^{\infty}\equiv T_{{\rm c}\rm p} \, {\rm e}^{\nu/2}$
($\nu$ is the metric function)
versus radial stellar coordinate $r$
(in units of $R$).
The redshifted proton critical temperature
is taken to be constant
$T_{\rm cp}^\infty = 5 \times 10^9$~K;
the redshifted neutron critical temperature
varies with $r$ and has maximum
$T_{{\rm c n} \, {\rm max}}^\infty = 6 \times 10^8$~K
in the stellar centre.
In the right panel of Fig.\ 3
we hatch the region occupied by the neutron superfluidity
at a redshifted stellar temperature
$T^\infty \equiv T \, {\rm e}^{\nu/2} =4 \times 10^8$~K.
\begin{figure}
\begin{center}
\leavevmode
\epsfxsize=3.3in \epsfbox{3.eps}
\end{center}
\caption{(color online) Left panel: Nucleon critical temperatures $T_{{\rm c}k}$
($k={\rm n,p}$) versus density $\rho$.
Right panel: Redshifted critical temperatures
$T_{{\rm c}k}^\infty$
versus radial coordinate $r$. See text for details.
}
\label{Fig:Tc}
\end{figure}
To model oscillations of superfluid NSs one has to use
the hydrodynamics of mixtures of superfluid Fermi-liquids
(\citealt*{ab75,ac07,ga06,gusakov07}).
The important parameter of such hydrodynamics
is the so called entrainment matrix $\rho_{ik}$
(\citealt*{ab75,bjk96,gh05}),
or relativistic entrainment matrix $Y_{ik}$
(\citealt{gkh09a,gkh09b}).
Both these matrices are very temperature-dependent (\citealt{gh05,gkh09b}).
As a consequence, the eigenfrequencies and eigenfunctions
of oscillating superfluid NS
also depend on temperature
(\citealt{ga06,kg11}, \citealt*{cg11}
\footnote{In this paper we use the standard (textbook) version
of superfluid hydrodynamics in which the independent velocity fields are
${\pmb V}_{{\rm s}\rm n}$, ${\pmb V}_{{\rm s}\rm p}$, and ${\pmb V}_{\rm q}$.
Notice, however, that in the NS literature an equivalent form
of superfluid hydrodynamics is often used which follows from the
convective variational principle formulated by Carter
and analyzed, in the nonrelativistic framework,
by \cite{prix04}.
In this hydrodynamics (and in the context of npe-matter)
the independent velocity fields
are ${\pmb v}_{l} \equiv {\pmb J}_l/\rho_l$,
where ${\pmb J}_l$ and $\rho_l$
are, respectively,
the mass current density
and density for particle species
$l={\rm n}$, p, and e.
These velocities are related with
${\pmb V}_{{\rm s}\rm n}$,
${\pmb V}_{{\rm s}\rm p}$,
and ${\pmb V}_{\rm q}$
by the following equations ($i=$n, p):
$\rho_{i}{\pmb v}_{i}= \sum_{k={\rm n},
\, {\rm p}} \rho_{ik} {\pmb V}_{{\rm s} k}
+ (\rho_{i}-\sum_{k={\rm n},
\, {\rm p}} \rho_{ik}) {\pmb V}_{\rm q}$;
${\pmb v}_{{\rm e}}={\pmb V}_{\rm q}$.
See \cite{prix04} for more details.
In terms of velocities
${\pmb v}_{\rm n}$, ${\pmb v}_{\rm p}$, and ${\pmb v}_{\rm e}$
the difference
$\Delta {\pmb V}_{\rm n}= {\pmb V}_{\rm sn}- {\pmb V}_{\rm q}$
equals:
$\Delta {\pmb V}_{\rm n}=
[\rho_{\rm n} \rho_{\rm pp} ({\pmb v}_{\rm n}-{\pmb v}_{\rm e})
-\rho_{\rm p}\rho_{\rm np}({\pmb v}_{\rm p}-{\pmb v}_{\rm e})]
/(\rho_{\rm nn}\rho_{\rm pp}-\rho_{\rm np}^2)$.
}.
Below we consider the {\it first}
radial oscillation mode of a superfluid NS
(see \citealt{kg11}, particularly figure 3 there).
\begin{figure}
\begin{center}
\leavevmode
\epsfxsize=3.3in \epsfbox{4.eps}
\end{center}
\caption{(color online) Amplitudes of the eigenfunctions $\Delta V_{\rm n}$
(solid lines)
and the critical velocities $\Delta V_{\rm cr}$ (dashes)
versus $r/R$ for the four temperatures
$T^\infty=3.0 \times 10^7$~K (black lines),
$8.0 \times 10^7$~K (red lines), $2.0 \times 10^8$~K (blue lines),
and $5.0 \times 10^8$~K (violet lines).
To plot $\Delta V_{\rm n}$ we assumed
that the energy of oscillations is $E_{\rm mech}=10^{47}$~erg.
The vertical dotted lines show the (temperature-dependent)
boundaries between the inner superfluid and the outer normal regions.
See text for details.
}
\label{Fig:dV_r}
\end{figure}
Figure 4 shows the amplitude of the eigenfunction
$\Delta V_{\rm n} \equiv |{\pmb V}_{{\rm s}\rm n} -{\pmb V}_{\rm q}|$
and the critical velocity
$\Delta V_{\rm cr}$
as functions of $r$
(solid and dashed lines, respectively; both in units of $10^7$~cm~s$^{-1}$
\footnote{We stress that {\it both} velocities
${\pmb V}_{{\rm s}\rm n}$ and ${\pmb V}_{\rm q}$
are calculated self-consistently using
the finite temperature superfluid hydrodynamics.}.
We plot $\Delta V_{\rm n}$ and $\Delta V_{\rm cr}$
for four redshifted stellar temperatures:
$T^\infty=3.0\times 10^7$~K (black lines), $8.0 \times 10^7$~K (red lines),
$2.0 \times 10^8$~K (blue lines), and $5.0 \times 10^8$~K (violet lines).
The oscillation frequencies $\omega$ of the first radial mode
for such temperatures are
$\omega/(10^4 \, {\rm s^{-1}})\approx 1.702$, $1.702$,
$1.064$, and $0.516$, respectively.
The vertical dotted lines in Fig.\ 4 indicate (temperature-dependent)
boundaries between the neutron superfluid region
and the outer normal region with nonsuperfluid neutrons.
In the normal region
the functions $\Delta V_{\rm n}$ and $\Delta V_{\rm cr}$ are not defined.
The oscillation energy
of the star is $E_{\rm mech}=10^{47}$~erg.
For a {\it nonsuperfluid} NS
this energy corresponds to an oscillation amplitude
\begin{equation}
\varepsilon \equiv \lim_{r \rightarrow 0} \, \frac{\xi(r)}{r}
\approx 4.4 \times 10^{-4},
\label{ampl}
\end{equation}
where $\xi(r)$ is the Lagrangian displacement (\citealt*{gyg05}).
It follows from Fig.\ 4
that $\Delta V_{\rm n}$ can substantially exceed
the critical values $\Delta V_{\rm cr}$,
so that superfluidity is destroyed
by oscillations in the large part of the stellar core
(see, in particular, the violet and blue curves).
This means that the $\Delta {\pmb V}$-effect
can greatly influence (or even drive) the dynamics of NSs
already at rather modest oscillation amplitudes.
\begin{figure}
\begin{center}
\leavevmode
\epsfxsize=2.5in \epsfbox{5.eps}
\end{center}
\caption{(color online)
Neutron energy gap $\Delta(T, \, \Delta V_{\rm n})$
(in units of $10^9$~K)
versus $r/R$ for two temperatures $T^\infty = 8 \times 10^7$~K (upper panel)
and $T^\infty=2 \times 10^8$~K (bottom panel) and some oscillation
energies $E_{\rm mech}$ (indicated in the figure).
Vertical dotted lines show $r$ at which neutron superfluidity disappears ($\Delta_{\rm n}=0$).
The larger $E_{\rm mech}$ the smaller the superfluid region.
See text for details.
}
\label{Fig:Gap_r}
\end{figure}
This point is additionally illustrated in Fig.\ 5,
where we plot the neutron energy gap
$\Delta_{\rm n}(T, \, \Delta V_{\rm n})$ versus $r/R$
for two temperatures,
$T^\infty = 8.0 \times 10^7$~K (upper panel) and
$T^\infty =2.0 \times 10^8$~K (bottom panel),
and a set of oscillation energies $E_{\rm mech}$.
In the upper panel
$\Delta_{\rm n}(T, \, \Delta V_{\rm n})$
is shown for $E_{\rm mech} = 0,\, 5.0\times 10^{47},\, 10^{48}$, and $5\times 10^{48}$~erg;
in the bottom panel
$\Delta_{\rm n}(T, \, \Delta V_{\rm n})$
is shown for $E_{\rm mech} = 0,\, 10^{46},\, 10^{47}$, and $10^{48}$~erg.
The oscillation amplitudes $\varepsilon$
[given by Eq.\ (\ref{ampl})]
for these oscillation energies
are presented in Table.
Notice that in each panel of Fig.\ 5 the curves are plotted using
the eigenfunctions $\Delta V_{\rm n}(r)$,
which differ from one another only by normalization
(by the value of $E_{\rm mech}$).
For $E_{\rm mech}=10^{47}$~erg these eigenfunctions
have already been presented in Fig.\ 4
(see the red and blue solid lines;
the red line corresponds to
$T^\infty = 8.0 \times 10^7$~K,
the blue line -- to $T^\infty = 2.0 \times 10^8$~K).
If $E_{\rm mech} = 0$
(no oscillations; see the solid lines in both panels of Fig.\ 5)
the gap $\Delta_{\rm n}$ is unaffected by $\Delta V_{\rm n}$
and is entirely determined
by the dependence of $T_{\rm cn}$ on $r$ (see Fig.\ 3).
The vertical dotted lines in Fig.\ 5 indicate boundaries between
the inner superfluid and the outer normal regions;
these boundaries depend on $E_{\rm mech}$.
Obviously, the higher $E_{\rm mech}$,
the larger $\Delta V_{\rm n}(r)$,
and, correspondingly,
the smaller the superfluid region and $\Delta_{\rm n}$.
One sees that the gaps are very sensitive
to variation of $\Delta V_{\rm n}$.
\begin{table}
\caption{Oscillation (mechanical) energy $E_{\rm mech}$ and the corresponding amplitude
of oscillations $\varepsilon$, defined by Eq.~(\ref{ampl}).
}
\begin{center}
\begin{tabular}{|l|c|c|c|c|c|c|c|}
\hline
$E_{\rm mech}/(10^{47} \, {\rm erg})$ & $0.0$ & $0.1$ & $0.5$ & $1$ & $5.0$ & $10.0$ & $50.0$ \\
\hline
$\varepsilon/10^{-4}$ & $0.0$ & $1.4$ & $3.1$ & $4.4$ & $9.7$ & $14$ & $31$\\ \hline
\end{tabular}
\label{tab}
\end{center}
\end{table}
\section{Possible applications}
As follows from the consideration of the previous section,
the $\Delta {\pmb V}$-effect can operate
at not too small oscillation amplitudes.
All interesting consequences of this effect
are related to the reduction of baryon gaps.
Let us list some of them:
(1) The reduction of the gaps
influences the entrainment matrix $\rho_{ik}$ (\citealt{gh05}),
which depends on them.
As a result,
$\rho_{ik}$ will become a non-linear function
of the oscillation amplitude.
This will
($i$) make the oscillation equations nonlinear
and hence
($ii$) affect the eigenfrequencies and eigenfunctions of oscillating NS.
Moreover, this will
($iii$) influence the dissipation processes,
because bulk viscosity terms
explicitly depend on $\rho_{ik}$.
In a rotating star the decrease
of the element $\rho_{\rm np}$ of the entrainment matrix
will, in addition,
($iv$) reduce the mutual friction force,
which is proportional to $\rho_{\rm np}$ (\citealt*{als84}).
We emphasize that the dependence of $\rho_{\rm np}$ on $T$
and on $\Delta V_{\rm n}$ and $\Delta V_{\rm p}$
is a very important effect for mutual friction and related phenomena,
which has been neglected in the literature.
How to calculate the entrainment matrix $\rho_{ik}$
taking into account the $\Delta{\pmb V}$-effect?
A direct calculation is difficult
(but one can perform it in a manner similar
to how it was done in \citealt{gh05}).
A good approximation for $\rho_{ik}$
could be to calculate it from the formula (49) of \cite{gh05}
making use of the velocity-dependent gaps from Sec.\ II
instead of the gaps
$\Delta_{\rm n}(T, \, 0)$ and $\Delta_{\rm p}(T, \, 0)$.
In this way one would obtain, for instance, for $\rho_{\rm np}$
\begin{equation}
\rho_{\rm np} = \frac{p_{\rm F n}^{3/2} p_{\rm F p}^{3/2}}{9 \,\pi^2 \, S}
\, \frac{m_{\rm n}m_{\rm p}}{\sqrt{m_{\rm n}^\ast m_{\rm p}^\ast}}\,
F_1^{\rm np} \,(1-\Phi_{\rm n})\,(1-\Phi_{\rm p}),
\label{rhonp}
\end{equation}
where $S = ( 1 + F_1^{\rm nn} \, \Phi_{\rm n}/3) \,
( 1 + F_1^{\rm pp} \, \Phi_{\rm p}/3 )
- ( F_1^{\rm np}/3 )^2 \, \Phi_{\rm n} \Phi_{\rm p}$;
$m_i$, $m_{i}^\ast$, $p_{{\rm F}i}$, and
$F_1^{ik}$
are the mass of a free particle,
Landau effective mass, Fermi momentum and
the dimensionless Landau parameters, respectively
($i,\, k={\rm n},\, {\rm p}$).
Further, $\Phi_i$ is a simple function of $x_i\equiv \Delta_i(T, \, \Delta V_{i})/T$,
specified in \cite{gh05},
which changes from 0 at $T=0$ to 1 at $\Delta_i(T, \Delta V_{i})=0$.
One sees from Eq.\ (\ref{rhonp}) that $\rho_{\rm np}$ vanishes
whenever $\Delta_{\rm n}(T, \, \Delta V_{\rm n})=0$ (and hence $\Phi_{\rm n}$=1)
or $\Delta_{\rm p}(T, \, \Delta V_{\rm p})=0$ (and hence $\Phi_{\rm p}=1$).
(2) Another important consequence of the $\Delta{\pmb V}$-effect
is its impact on kinetic coefficients of NS matter,
in particular, on the bulk and shear viscosities.
($i$){\it Bulk viscosity}.
There are four bulk viscosity coefficients in the $\rm npe$-matter
of NSs (\citealt{gusakov07}).
All of them are generated by nonequilibrium beta-processes
(direct or modified URCA reactions)
and depend on the difference $\Delta \Gamma$
between the direct and inverse reaction rates.
$\Delta \Gamma$ is generally a complicated function of $T$, $\Delta_{\rm n}$, $\Delta_{\rm p}$,
and of the imbalance of chemical potentials
$\delta \mu \equiv \mu_{\rm n}-\mu_{\rm p}-\mu_{\rm e}$ (\citealt*{hly00,hly01}),
where $\mu_i$ is the chemical potential for particle species $i=\rm n$, $\rm p$, $\rm e$.
Recently it has been shown by \cite*{ars12},
that if $\delta \mu > {\rm max}\{\Delta_{\rm n}, \, \Delta_{\rm p}\}$
then, even for $T\ll \Delta_{\rm n}$ and/or $\Delta_{\rm p}$,
the bulk viscosity is {\it not} suppressed by the nucleon superfluidity
and can be very efficient.
It seems that the $\Delta{\pmb V}$-effect of the reduction
of the energy gaps $\Delta_{\rm n}$ and $\Delta_{\rm p}$
by relative motion of superfluid and normal component
is {\it complementary} to the effect considered in \cite{ars12}.
Both effects act in unison to increase the bulk viscosity coefficients,
and they are of comparable strength.
Notice, however, that the effect of \cite{ars12}
can only affect the bulk viscosity coefficients,
while the applicability range of the $\Delta{\pmb V}$-effect is wider;
it directly influences the baryon energy gaps and thus all dynamics of NSs.
($ii$) {\it Shear viscosity}.
Neglecting entrainment between baryon species ($\rho_{\rm np}=0$),
the shear viscosity $\eta$ can be calculated
in the same fashion as was done,
e.g., in \cite*{sy08}
[the results will be the same].
The only difference is that one should use the velocity-dependent
gaps $\Delta_i(T, \, \Delta V_i)$
instead of $\Delta_i(T,\, 0)$ in all equations
[$i=\rm n$, $\rm p$].
It is interesting that the $\Delta {\pmb V}$-effect can both
increase or decrease the shear viscosity.
For example, the electron shear viscosity $\eta_{\rm e}$ decreases
with increasing $\Delta V_{\rm p}$
(that is, with reducing $\Delta_{\rm p}$),
because electrons are better screened by protons
when $\Delta_{\rm p}$ is large (\citealt{sy08}).
On the other hand,
the neutron shear viscosity $\eta_{\rm n}$
can either decrease or increase
with growing $\Delta V_{\rm n}$ and $\Delta V_{\rm p}$.
The behaviour of $\eta_{\rm n}$
in that case is determined by the competition of two effects:
by the increase of the normal density
of neutron Bogoliubov excitations $\rho_{\rm qn}$
and by the reduction of the neutron mean free path $\lambda$
due to more frequent collisions with neutron and proton Bogoliubov excitations
(note that $\eta_{\rm n}$ can be estimated as
$\eta_{\rm n} \sim \rho_{\rm q n} \, v_{\rm Fn} \, \lambda$,
where $v_{\rm Fn}$ is the neutron Fermi-velocity).
Similar effects were carefully analyzed in \cite*{bhy01}
in application to the neutron thermal conductivity.
An entrainment between neutrons and protons
will strongly modify the derivation of the
neutron shear viscosity,
even neglecting the $\Delta {\pmb V}$-effect.
The main difference will be the equilibrium
Fermi-Dirac distribution function
for neutron Bogoliubov excitations in a system
with superfluid currents.
This function was first obtained in \cite{gh05}
[see equation (28) there];
it is very different from the standard expression,
valid when $\rho_{\rm np}=0$.
To our best knowledge,
a derivation of $\eta_{\rm n}$ in a system {\it with} entrainment
has not been attempted in the literature.
(3) Finally, there is a number of important consequences of the fact
that the relative velocity $\Delta {\pmb V}$
between the superfluid and normal liquid components
cannot be too large in a {\it stationary} rotating NS.
Here we present two of them.
($i$) It is generally accepted that neutron vortices are pinned
to atomic nuclei in the NS crust
(or to magnetic flux tubes in the NS core).
At a certain critical $\Delta {\pmb V}$
they can unpin from the nuclei
(or from magnetic flux tubes).
However,
in some models (e.g., \citealt*{link09})
pinning is so strong that
the critical relative velocity can be
as high as $10^6 \div 10^7$~cm~s$^{-1}$.
These values are close to
$\Delta V_{\rm cr}$ [see Eq.\ (\ref{VsVq})].
Thus,
the $\Delta {\pmb V}$-effect
can be very important for such models.
It can also play a role in explanation
of the long-period precession of isolated pulsars (\citealt{link03}
\footnote{We thank the anonymous referee
for pointing out to us this possibility.}.
($ii$) In \cite*{acp04} and \cite{slac10}
a two-stream instability
in homogeneous superfluid matter
is discussed,
that can be triggered once the relative velocity
$\Delta {\pmb V}$ reaches some critical value.
According to these authors,
the critical value is of the order of the sound speeds,
i.e., it is {\it much greater}
than the typical $\Delta V_{\rm cr}$,
at which superfluidity completely disappears
[see Eq.\ (\ref{VsVq})].
In other words,
it is not very probable
that this instability is realized in NSs.
Notice, however,
that under certain circumstances
similar instability in rotating NSs
can drive the so called inertial modes
unstable at a much lower $\Delta {\pmb V}$
(\citealt*{pca04}).
\section{Conclusion}
The baryon energy gaps depend
on the relative velocity between the superfluid
and normal components ($\Delta {\pmb V}$-effect).
We propose, for the first time,
that this effect may have a strong impact
on the dynamical properties of NSs.
We illustrate this point by considering
radial oscillations of an NS
with superfluid nucleon core
and a nonsuperfluid crust.
However, we stress that the $\Delta {\pmb V}$-effect
should be equally important in the crust of NSs
where superfluid neutrons are present,
as well as in the interiors of hyperon and quark stars.
Although we discussed some immediate applications in Sec.\ IV,
it is clear that
more efforts are needed to analyze all possible
consequences of this effect
on the evolution of NSs.
\section*{Acknowledgments}
The authors are very grateful to D.G. Yakovlev
for valuable comments and encouragement.
This research was supported by
Ministry of Education and Science of Russian Federation
(Contract No. 11.G34.31.0001
with SPbSPU and leading scientist G.G. Pavlov,
and Agreement No. 8409, 2012),
RFBR (grants 11-02-00253-a and 12-02-31270-mol-a),
FASI (grant NSh-4035.2012.2),
and by RF president programme (grant MK-857.2012.2).
|
{
"timestamp": "2012-11-06T02:06:07",
"yymm": "1206",
"arxiv_id": "1206.6580",
"language": "en",
"url": "https://arxiv.org/abs/1206.6580"
}
|
\section{Introduction}
The membrane of a eukaryotic cell is mainly composed of a lipid bilayer,
which is impermeable to water solvated ions \cite{Alberts:2007:MBC}.
Ion channels are nanopores formed by transmembrane proteins; they
allow ions to flow through and act as biological valves
connecting the intracellular with extracellular domains.
Ion channels are the main mechanism by which cells control the
intracellular concentration of chemical species, as well as the
potential gradient across the membrane.
As such they play important roles in maintaining various functions
of plant, animal and human cells.
There are two main features which distinguish ion channels from other
nanoscale porous media. Firstly, they may be
selective, distinguishing between the charge and size
of ions; for example, the potassium K$^+$
channel conducts potassium ions at a rate $10^4$ times faster than
it does sodium ions \cite{Doyle:1998:SPC}.
Secondly, their conformations may change between open and closed states
in response to an external stimulus such as a voltage gradient,
ligand binding, or pH value.
The molecular structure of many ion channels has been revealed by
X-ray crystallography in recent decades, which provides insight into
their features and function.
For example, the potassium K$^+$ channel is composed of four identical
subunits which create a {\em cavity} connecting the cell interior to a
{\em selectivity filter} at the outer end of the pore \cite{Doyle:1998:SPC}.
The narrow selectivity filter is only $12$ \AA \, long
and about $3$ \AA\, wide, which forces potassium ions with Pauling
radius $1.33$ \AA \,to shed their hydrating waters to enter and pass
in a single-file fashion. The oxygen atoms of four carbonyl
groups form four rings around the selectivity filter, which generate
local minima called {\em binding sites} in the overall energy
landscape to coordinate the dehydrated ions.
Mathematical models for ion channels include molecular dynamics
(MD), Brownian dynamics (BD) and continuum theory
(Poisson-Nernst-Planck equations) in descending order
of resolution \cite{Cooper:1985:TIT,Cooper:1988:DTD,Levitt:1999:MIC}. Molecular dynamics
provides the most detailed description by mimicking the motions and
interactions of all atoms (from membrane proteins to free ions and
even individual water
molecules) at the molecular level
\cite{Berneche:2001:EIC,Pongprayoon:2009:SAT,Jensen:2010:PCH}. Since the relaxation
of water molecules happens at
the fastest timescale of $1$ fs, the time step of an MD simulation has to
be very small, and one needs to evolve a system of thousands of
particles up to of the order of $0.1$ ms to observe ion conduction.
Such a simulation is obviously computationally intensive,
but much shorter simulations (of the order of $10$ ps)
can be used to obtain information about the local potential energy and the
effective diffusion coefficient of ions, which can then be fed into BD
simulations.
Brownian dynamics
\cite{vanGunsteren:1982:ABD,Corry:2000:TCT,Moy:2000:TCT,Cheng:2007:MFG} is a more
coarse-grained simulation which replaces the solvent molecules (water)
as a continuum, and represent their influence by a dialectric constant
and stochastic forcing.
The fluctuations of membrane proteins are ignored and the channel is
approximated by a solid boundary. Because the dynamics of water and
proteins are no longer
included, a relatively long time step can be used, which greatly
reduces the computational cost. In this paper, we focus on this level
of resolution, and introduce a discrete rate theory that is based on
observations from BD.
The continuum model
\cite{Chen:1997:POC,Schuss:2001:DPN,Nadler:2004:IDC} calculates the potential
energy by a mean-field approximation of average ion positions, which
yields a Poisson equation,
and then formulates a Boltzmann equation (in equilibrium) or
a Nernst-Planck equation (in non-equilibrium) for the ion
concentration. These continuum partial differential equations (PDEs)
can be solved efficiently; however
the individual ion-ion interaction is missing in this mean-field
assumption which then fails to predict some properties
(e.g. saturation). Comparisons of BD and continuum theories in different
channel configurations are presented in \cite{Corry:2000:TCT,Moy:2000:TCT}.
Recently several hybrid models combining MD and the theory of stochastic
processes have been proposed,
which are able to include molecular details and access long time scales
while keeping computational cost low.
One idea is to apply the Eyring rate theory to the ion permeation process
using the potential of mean force (PMF) calculated using MD.
This is based on the assumption that channels have some binding sites,
and ions pass through by a hopping mechanism: an ion
fluctuates around a certain site
before it obtains enough energy to overcome the energy barrier and
hops into the adjacent vacant site.
This ion hopping mechanism has been revealed by MD in channels with
binding sites \cite{Berneche:2001:EIC,Berneche:2003:MVI,Jensen:2010:PCH}.
In addition, the single file diffusion constraint imposed by the
narrowness of the channel assures that ions cannot cross each
other in the channel.
Therefore, the continuous dynamics of ion diffusion can be represented
by transitions between discrete Markovian states.
The Eyring rate theory was originally designed for chemical reactions
in the 1930s, with transition rates proportional to the
exponential of the energy barrier and distance between binding sites
\cite{Eyring:1935:ACA} (as shown in \cite{Cooper:1985:TIT}
this overestimates the physical barrier in the ion-crossing process).
A novel theory was proposed recently in \cite{Abad:2009:NRT} for a
one-dimensional channel with sawtooth-like PMF,
in which the transition rates are not approximated using the energy
barrier but are obtained as the product of total
escape rate from one binding site and the splitting probability
determining the relative chance of landing in each neighbouring site.
\cite{Abad:2009:NRT} showed that an optimal size of binding site maximizes
the ionic flux if the applied voltage exceeds a threshold. They
assume the channel is occupied by at most one ion,
whereby the resulting system forms a single Markov chain,
and the rates can be solved explicitly.
In the multi-ion channel considered here, ion-ion interactions as well
as the higher
dimension of the energy landscape mean that the complexity of the
rate theory is greatly increased.
In this paper, we present a general discrete rate theory for a multi-ion
channel, and compare it with BD.
The ion permeation process involves ion hopping, ion escaping and ion
entering.
For the purposes of this work
we assume ion entry rates are known and focus on calculating
the other rates in terms of the mean escape time and splitting
probability.
Because of the complicated network between states the rates are more
intricately related to these quantities than in the single ion case.
Moreover, since analytical solutions for the mean
escape time and splitting probability are not available,
these must be determined by solving the corresponding PDEs
numerically.
The theory is illustrated by a two-ion channel with one binding site
and two ion sources. We show that, as with the one-ion channel, there
exists an optimal shape for the external potential that allows
a maximal flux.
The structure of this paper is as follows. In Section{} \ref{sec:BD},
we introduce a general theory for a multi-ion channel with a maximal
capacity of $N$ ions. We first present BD simulations
and formulate an equivalent cascade of hierarchical Fokker-Planck
equations for the probability distribution of ions. An illustrative
example of a $2$-ion channel is
discussed and the probability distribution from the histogram of BD
and the solution of the Fokker-Planck equation are compared.
Next, a discrete rate theory framework is presented in Section{}
\ref{sec:rate} and the transition rates calculated. The
$2$-ion channel is revisited in this
framework, and the result is compared with that from BD.
In Section{} \ref{sec:IV}, we apply
the theory to study the dependence of channel conduction on different
parameters such as the diffusion coefficient, ion entry rate and depth of
potential wells. In particular, we study the effect of the geometry of
the external
potential in Section{} \ref{sec:geometry}. We conclude by
discussing the advantages and limitations of this method and possible
applications and extensions in Section{} \ref{sec:conclusion}.
\section{Brownian dynamics} \label{sec:BD}
In this section, we present the theoretical framework of BD simulation.
Since we are interested in studying the ion permeation process, which
occurs on a
time scale of $10^{-7}$ s, and since conformational changes occur on
a timescale of $10^{-3}$ s, we assume that channel is always open and
does not change its
conformation.
Since the channel is very narrow and the ions pass through in single
file \cite{Jensen:2010:PCH}, we will suppose that the motion is
one-dimensional, that is, the centres of
the ions will be constrained to lie along a line. The generalisation
to a fully three dimensional channel is algebraically complicated but
conceptually straightforward.
Since ions cannot pass each other in one dimension, we may neglect the
finite size of the ions and model them as point particles with
charge.
We define the maximal capacity of a channel to be $N$, so that it can
hold up to $N$ ions at one time.
We denote the number of binding sites in the channel by $M$.
The parameters $N$ and $M$ vary among different channels; for example,
a germicidal A channel has two
binding sites ($M=2$) and single-ion occupation dominates (so that
$N=1$, or perhaps $N=2$ to allow for a knock-on effect)
\cite{Abad:2009:NRT,Procopio:1979:ITF}.
\begin{figure}[t]
\centering
{
\begin{picture}(0,0)%
\includegraphics{Figure2_1.pdf}%
\end{picture}%
\setlength{\unitlength}{4144sp}%
\begingroup\makeatletter\ifx\SetFigFont\undefined%
\gdef\SetFigFont#1#2#3#4#5{%
\reset@font\fontsize{#1}{#2pt}%
\fontfamily{#3}\fontseries{#4}\fontshape{#5}%
\selectfont}%
\fi\endgroup%
\begin{picture}(5043,2056)(886,-2291)
\put(2095,-1727){\makebox(0,0)[lb]{\smash{{\SetFigFont{7}{8.4}{\rmdefault}{\mddefault}{\updefault}{\color[rgb]{0,0,0}$x=-L$}%
}}}}
\put(4483,-1727){\makebox(0,0)[lb]{\smash{{\SetFigFont{7}{8.4}{\rmdefault}{\mddefault}{\updefault}{\color[rgb]{0,0,0}$x=L$}%
}}}}
\put(1034,-1196){\makebox(0,0)[lb]{\smash{{\SetFigFont{7}{8.4}{\rmdefault}{\mddefault}{\updefault}{\color[rgb]{0,0,0}${\cal I}$}%
}}}}
\put(5280,-1196){\makebox(0,0)[lb]{\smash{{\SetFigFont{7}{8.4}{\rmdefault}{\mddefault}{\updefault}{\color[rgb]{0,0,0}${\cal E}$}%
}}}}
\end{picture}%
}
\caption{A schematic structure of a channel. $x=-L$ and $x=L$
are the (artificial) left and right boundaries connecting
large reservoirs of electrolyte in and outside the cell
respectively. $\mathcal{I}$ and $\mathcal{E}$ represent the overall
intracellular and extracellular environments, respectively. }
\label{fig:channel}
\end{figure}
At the ends of the channel the pore opens out into the
intracellular and extracellular space.
A full model would include (probably continuum) models of these
spaces, which would then be joined (preferably matched in terms of
matched asymptotic expansions, but more likely patched)
to the channel model. For our present
purposes we need
to introduce (artificial) interfaces (i.e. points) at the left and
right ends of the
channel such that an ion passing through these interfaces is taken to
have left the channel and passed into the external domains.
Without loss of generality, we suppose that the left interface
connecting the channel to the intracellular domain lies at $x=-L$, and
the right interface connecting the channel to the
extracellular domain lies at $x=L$, as shown in Fig. \ref{fig:channel}.
Thus an absorbing
boundary condition is imposed at $x=-L$ and $x=L$.
In BD simulation of an ion channel the contribution of water molecules
to the motion of a solute ion can be approximated by random
collisions and an average frictional force in the evolution equation of
the solute ion \cite{vanGunsteren:1982:ABD,Moy:2000:TCT}.
The motion of a system of $k$ ions is given by the Langevin equation
\begin{equation}
\label{fullLangevin}
m_i \, dv_i = - \gamma\,v_i \mbox{d} t +
f^k_i(x_1, \ldots,x_k) \mathrm{d}t + \gamma
\sqrt{2 \,D}\, dW_i, \quad i=1, \ldots, k,
\end{equation}
where $x_i(t)$ and $v_i(t)$ are the location and
velocity of the $i^{\mbox{\scriptsize th}}$ ion respectively.
There are three forces on the right hand side of
(\ref{fullLangevin}). The first term
corresponds to the frictional force exerted on the ion by averaging
the
effect of water molecules; $\gamma$ is the frictional drag
coefficient, which depends on the surrounding fluid environment. Here
we assume
it to be uniform so that $\gamma$ is constant. The third term is the
stochastic force generated by the random collisions of water molecules;
$W_i$ is a Wiener process and
$D=k_B T/\gamma$ is the diffusion coefficient, where
$k_B$ is the Boltzmann constant and $T$ is the temperature.
The second term $f^k_i(x_1, \ldots, {x}_k)$ is the overall
electric force on the $i^{\mbox{\scriptsize th}}$ ion,
including interactions with all other $k-1$ ions in the channel,
fixed charges in the protein, and external field across the
membrane. It depends on the locations of all ions, and can be obtained
(along with the diffusion coefficient)
from MD simulation.
Note that a typical value of the diffusion coefficient in aqueous
solutions at room temperature is $D \sim 10^{-3} \mbox{mm}^2 \mbox{s}^{-1}$,
so that the ratio $m_i/\gamma \sim 10^{-14} \mbox{s}^{-1}$. Since we
usually take a time step
$\Delta t > 10^{-12} \mbox{s}$ in the simulation, the system is in an
overdamped limit \cite{Cooper:1985:TIT}.
We may thus approximate (\ref{fullLangevin}) by the overdamped
Langevin equation
\begin{equation}
\label{Langevin}
{dx}_i = \frac{\,D}{\,k_B T } {f}^k_i({x}_1,
\ldots, {x}_k)\, \mathrm{d}t
+ \sqrt{2 D }\, {dW}_i, \quad i=1, \ldots, k.
\end{equation}
The boundary conditions on (\ref{Langevin}) may be described as
\begin{enumerate}
\item When the number of ions in the channel $k$ is less than its
capacity $N$, new ions are generated at the left (respectively
right) end
at a rate $H_k$ (respectively $G_k$). In principle $H_k$ and $G_k$
depend on the current locations of the $k$ ions in the
channel ${x}_1, \ldots, {x}_k$ as well as the
intracellular and extracellular environments
$\mathcal{I}$ and $\mathcal{E}$.
Since we are in the overdamped limit we cannot simply place the
incoming ions at the ends of the channel: under Brownian motion they
would immediately cross the boundary and leave the channel
again. Instead we place them at a position within the channel given by
the positional distribution function $h({x}; {x}_1, \ldots, {x}_k)$
(or respectively
$g({x}; {x}_1, \ldots, {x}_k)$). Note that $h$ and $g$ also depend on
the positions of the existing ions. This is necessary since, for
example, an ion entering the channel from the left must lie to the
left of $x_1$, while an ion entering from the right must lie to the
right of $x_k$. Thus, at the very least, $h$ depends on $x_1$ while
$g$ depends on $x_k$.
The functions $h$ and $g$ should be chosen to make the
join with the outer model as smooth as possible, as in
\cite{Flegg:2012:TRM,Franz:2012:PGS}.
Here we simply assume that $h$ and $g$, and
the rates $H_k$ and $G_k$, are given.
\item When ${x}_i(t)<-L$ or ${x}_i(t)>L$ the $i^{\mbox{\scriptsize th}}$
ion is removed from the channel.
\item If ${x}_i(t)> x_{i+1}(t)$ for some $i$ then $x_i$ and $x_{i+1}$
are switched. This enforces the single-file nature of the channel by
preventing an ion overtaking its neighbour.
This condition is unlikely to occur with ions in
a channel due to the strong Coulomb repulsion, but may be necessary
if we are interested in neutral molecules.
\end{enumerate}
\subsection{Hierarchical Fokker-Planck equations}
We denote by
$P_k({x}_1, \ldots, {x}_k, t)$ the probability density
function for the event that there are $k$ ions in the channel at positions
${x}_1, \ldots, {x}_k$ at time $t$. Since the number of
ions in the channel may run from zero to the channel capacity $N$, we
have $N+1$ such probability density functions.
The probability of no ion in the channel (i.e. $k=0$)
is denoted by $P_0(t)$, and is independent of the spatial variable.
We label the ions by the order of their locations, such that
${x}_i < {x}_j$
for $i <j$. Then the stochastic process
\eqref{Langevin} is equivalent to the following hierarchical system of
Fokker-Planck equations:
\begin{figure}[t]
\centering
{
\includegraphics[width=5.04in]{Figure2_2}
}
\caption{Hierarchical Fokker-Planck Equations describe the conservation of
ions in the channel. For $k$-ion occupancy, the transitions to and from
$(k-1)$-ion and $(k+1)$-ion occupancy by ions entering and escaping
are demonstrated, along with internal transitions between states.}
\label{fig:FP}
\end{figure}
\begin{subequations}
\begin{eqnarray}
\partial_t\, P_k({x}_1, \ldots, {x}_k,t) &=& D \,\nabla
\cdot \Big( \nabla P_k({x}_1, \ldots, {x}_k,t)
- P_k({x}_1, \ldots, {x}_k,t) \, \frac{\,1}{\,k_B T}
{\mathbf F}_k({x}_1, \ldots, {x}_k,t)
\Big)\nonumber \\
&&\mbox{}- \Big(H_k({x}_1, \ldots, {x}_k)
+ G_k({x}_1, \ldots, {x}_{k})
\Big) P_k({x}_1, \ldots, {x}_k, t) \nonumber \\
&&\mbox{}+ H_{k-1}({x}_2, \ldots,
{x}_{k})\,h({x}_1;x_2,\ldots,x_{k})P_{k-1}({x}_2, \ldots,
{x}_{k}, t) \nonumber \\
&& \mbox{} + G_{k-1}({x}_1, \ldots,
{x}_{k-1})\, g({x}_k;x_1,\ldots,x_{k-1})
P_{k-1}({x}_1, \ldots, {x}_{k-1}, t)\nonumber \\
&&
\mbox{}+ T_{k+1}( {x}_1, \ldots, {x}_{k}) +
R_{k+1}( {x}_1, \ldots, {x}_{k}) , \label{FP}
\end{eqnarray}
where $\nabla=(\partial_{{x}_1},
\ldots, \partial_{{x}_k})$, ${\mathbf F}_k=({f}^k_1,
\ldots, {f}^k_k) \in \mathbb{R}^{k}$, and
\begin{eqnarray*}
T_{k+1}( {x}_2, \ldots, {x}_{k+1}) &=& D \Big(
\frac{\partial P_{k+1}}{\partial x_1} -
P_{k+1} \, \frac{\,1}{k_B T} {f}^{k+1}_1 \Big)(-L,x_2,\ldots,x_{k+1}) , \\
R_{k+1}( {x}_1, \ldots, {x}_{k})& =& -D \Big( \frac{\partial
P_{k+1}}{\partial x_{k+1}} -
P_{k+1} \, \frac{\,1}{k_B T} {f}^{k+1}_{k+1} \Big)(x_1,\ldots,x_{k},L) ,
\end{eqnarray*}
where $k =0,\ldots,N$ and we use the convention that $P_{-1} =
P_{N+1} = 0$.
Since only one ion can
escape or enter at any one time, $P_k$ is coupled only to the neighbouring
states $P_{k-1}$ and $P_{k+1}$. Note that $H_{N} = G_{N} = 0$, since
no ions can enter when the channel is fully occupied.
The first two terms (i.e. the first line)
on the right-hand side of (\ref{FP}) correspond to
ion diffusion and ion drift respectively, where the drift term
includes the external potential as well as ion-ion interactions. The
third term corresponds to a new ion entering the $k$-ion channel from
intracellular or extracellular solution; this term is negative since
such an event leads to a transition from a $k$-ion channel to a
$(k+1)$-ion channel. The fourth and fifth terms correspond to a new ion
entering a $(k-1)$-ion channel from the left and right respectively.
The sixth and seventh terms (i.e. the last line) of (\ref{FP})
correspond to ions leaving a $(k+1)$-ion
channel from the left and right respectively.
\noindent
The boundary conditions on (\ref{FP}) are
\begin{eqnarray}
P_k(-L,x_2,\ldots,x_{k}) & = & 0,\\
P_k(x_1,\ldots,x_{k-1},L) & = & 0,
\end{eqnarray}
along with the no-flux condition on the interface $x_i = x_{i+1}$,
\begin{equation}
\lim_{x_i \rightarrow x_{i+1}}
\left(\frac{\partial P_k}{\partial x_i} - P_k \frac{1}{k_B T} f_i^k
\right)
= \lim_{x_{i} \rightarrow x_{i+1}}\left( \frac{\partial P_k}{\partial
x_{i+1}} - P_k \frac{1}{k_B T} f_{i+1}^k
\right)
\end{equation}
\end{subequations}
for $i = 1,\ldots,k-1$, which ensures that the ions are correctly labelled.
The reason we have had to write this as a limit is that the
inter-ion potential tends to infinity as $x_i \rightarrow x_{i+1}$, while
$P_k$ tends to zero. A local analysis shows that we need $P_k$ to tend
to zero faster than $(x_{i+1}-x_i)^2$.
We note the following normalisation condition, which
holds at all times $t$,
\begin{equation}
\label{sumP}
\sum_{k=0}^N \int_{\Gamma_k} P_k({x}_1, \ldots, {x}_k,t)\, d {x}_1
\cdots d {x}_k = 1,
\end{equation}
where $\Gamma_k$ is the available state space when there are $k$ ions
in the channel, namely $\Gamma_k = \{(x_1, \ldots,x_k): x_1<x_2<
\cdots < x_k\}$.
We will usually be interested in the steady state; in that case
we solve the
coupled hierarchical Fokker-Planck
equations for the stationary probability distribution
$\widetilde{P}_k = \lim_{t \to \infty} P_k(t) $ for
$k=0, 1, \ldots, N$.
\subsection{An example with $N=2$} \label{sec:example}
We exemplify the theory above with a simple channel that is
selective to cations with elementary charge $e = 1.6 \times
10^{-19}\,\mbox{C}$.
The selectivity of this type of channel is generally caused by
negative charged boundary proteins,
which decrease the energy barrier imposed by the narrow structure and
assist the permeation of cations.
For example, the oxygen atoms of four carbonyl groups in the
selectivity filter of the potassium channel can be modelled by putting
four negative partial charges equally
spaced on a ring of radius $d$ that is perpendicular to the $x$-axis
\cite{Corry:2000:TCT}.
We consider the simplest possible example of multi-ion channel
with capacity $N=2$ and a single binding site $M=1$.
The binding site is located at the position $x=\xi$ and is a potential
well generated by a ring of fixed partial negative charges a
distance $d$ from the channel axis.
By Coulomb's law, the potential energy $\Phi_1(x_1)$ seen by one
cation at $x_1$ with charge $e$ traversing through the channel is
\begin{subequations}
\label{pot_formula}
\begin{equation}
\label{pot1}
\Phi_1(x_1) = \frac{\,e}{\,k_B T }
\Big( \frac{- k_e Z }{\sqrt{(x_1-\xi)^2+d^2}} + U x_1 \Big).
\end{equation}
where $k_e$ the Coulomb force constant and $Z$
the total fixed charge on the ring, and
$U$ is the constant field, which imposes a potential difference $2 U
L$ across the channel $[-L, L]$. This potential difference
is small compared to the potential well,
and does not change the shape of the potential well but merely tilts
it by a small angle. The force on the ion due to the potential is
\[
f_1^1 = - k_B T \,\frac{{\rm d} \Phi_1}{{\rm d} x_1}
.\]
When there are two cations in the channel, at positions $x_1$ and $x_2$,
the overall potential energy $\Phi_2(x_1,x_2)$, including the interaction
between the two free ions, is
\begin{equation}
\Phi_2(x_1,x_2) = \frac{\,e}{\,k_B T } \Big( \frac{- k_e Z
}{\sqrt{(x_1-\xi)^2+d^2}} + \frac{- k_e Z }{\sqrt{(x_2-\xi)^2+d^2}} +
\frac{k_e e}{|x_1-x_2|} + U (x_1 + x_2) \Big).
\end{equation}
\end{subequations}
The forces on the two ions are then
\[
f^2_1 = - k_B T\,\frac{\partial \Phi_2}{\partial x_1},
\qquad
f^2_2 = - k_B T\,\frac{\partial \Phi_2}{\partial x_2}.
\]
Finally we need to specify the entry rates $H_k$ and $G_k$ and entry
distribution functions $h$ and $g$.
We choose the simplest possible model for the entry distribution
function. We suppose that the ions entering from the left are all
placed at a position $x_-$ near the left-hand end of the channel,
while ions enterying from the right are placed at a position $x_+$
near the right-hand end of the channel, that is
\[ h(x) = \delta(x -x_-) ,
\qquad
g(x) = \delta(x -x_+) .
\]
We have to be careful in
implementing this condition that we preserve the order of the ions in
the channel. We choose to do this as follows: if we are attempting to
place an ion at position $x_-$, and the position of the existing
ion $x_1<x_-$, then we abandon the insertion of the new ion.
A similar procedure is implemented at the right-hand end.
In effect this means that the rate of entry is chosen to be zero
whenever the position $x_1$ of the existing ion is such that $x_1<x_-$
or $x_1>x_+$. (An alternative procedure would be to modify the
distribution functions $h$ and $g$ so that $h=0$ if $x_1<x_-$ and
$g=0$ if $x_1>x_+$, but this would mean altering them from the present
$\delta$-functions.)
In general the entry rates may be functions of the current ion numbers
and locations as well as the intracellular $\mathcal{I}$ and
extracellular $\mathcal{E}$ environments. However, for this illustrative
example we suppose that they are constant subject to the constraint
set out above.
Thus we choose
\[ H_0 = \lambda, \quad G_0 = \mu, \quad
H_1 = \lambda \Theta(x_1-x_-), \quad
G_1 = \mu \Theta(x_+-x_1),\]
where $ \Theta$ is the Heaviside function.
Recall that $H_2 = G_2 = 0$ since the
channel is then fully occupied.
To run the Brownian simulation, we set the time step $\Delta t = 100
\,\mbox{ns}$, and the physical parameters as
\begin{gather}
L = 1 \mbox{nm}, \;\;\; x_{\pm} = \pm 0.9 \,\mbox{nm}, \;\;\; \xi = 0
\,\mbox{nm}, \;\;\; d = 0.5\,\mbox{nm},\;\;\;
D = 1 \mbox{nm}^2\cdot\mbox{ns}^{-1},\quad
\lambda = \mu = 5 \mbox{ns}^{-1}, \nonumber \\
\label{parameter}
T=298\, \mbox{K},\quad
k_B = 1.38 \times 10^{-23}\,
\mbox{J} \cdot\mbox{K}^{-1}, \quad
U = 0 \,\mbox{V}\cdot\mbox{nm}^{-1}, \quad
Z = e .
\end{gather}
We use the nanometer as the unit of length and the nanosecond as the
unit of time.
We evolve \eqref{Langevin} for $2 \times 10^{9}$ iterations until a
dynamic equilibrium is reached. During the simulation the number of
ions in the channel varies in time as ions enter and
leave. We record the number of ions and their locations at each time
step.
We find that the proportion of time spent with $k$ ions in the
channel, $J_k$ say, is given by
\begin{equation}
\label{BDJ}
J_0 \approx 0.0000, \quad J_1 \approx 0.8986,\quad J_2 \approx 0.1014.
\end{equation}
Thus, for these parameters, the channel is almost never empty,
and for nearly 90\% of the time there is just one ion in the
channel, with two ions the remaining 10\% of the time.
The histograms of $2$-ion distribution and $1$-ion distribution
are plotted in Fig. \ref{fig:SSProb2} and Fig. \ref{fig:SSProb1},
respectively.
The stationary probability distributions $\widetilde{P}_2(x_1, x_2)$,
$\widetilde{P}_1(x_1)$ and $\widetilde{P}_0$ satisfy the stationary
Fokker-Planck equations for a two-ion channel, namely
\begin{subequations}
\label{FP2}
\begin{eqnarray}
0 &=& D \,\nabla
\cdot \left( \nabla \widetilde{P}_2({x}_1, {x}_2)
+ \widetilde{P}_2({x}_1, {x}_2) \, \nabla \Phi_2({x}_1, {x}_2)
\right)
\nonumber \\ &&\mbox{ }
+ \lambda\Theta(x_2-x_-)\,\delta({x}_1-x_-)\widetilde{P}_{1}({x}_2)
+ \mu\Theta(x_+-x_1)\, \delta({x}_2-x_+)
\widetilde{P}_{1}({x}_1), \qquad
\label{SSFP1}\\
0 &=& D \,\fdd{}{x_1}\left( \fdd{\widetilde{P}_1}{x_1}({x}_1)
+ \widetilde{P}_1({x}_1) \fdd{\Phi_1}{x_1}({x}_1) \right)
- \left(\lambda\Theta(x_1-x_-)+ \mu\Theta(x_+-x_1)
\right) \widetilde{P}_1({x}_1) \nonumber \\
&&\mbox{}+ \lambda \,\delta({x}_1-x_-)\widetilde{P}_{0} + \mu\,
\delta({x}_1-x_+) \widetilde{P}_{0}\nonumber\\
&&
\mbox{}+ D \left(
\frac{\partial \widetilde{P}_{2}}{\partial x_1} +
\widetilde{P}_{2} \pd{\Phi_2}{x_1}\right)(-L,x_1) - D \left(
\frac{\partial \widetilde{P}_{2}}{\partial x_2} +
\widetilde{P}_{2} \pd{\Phi_2}{x_2}\right)(x_1,L)
, \label{SSFP2} \\
0 & = & -(\lambda+\mu)\widetilde{P}_0+ D \left(
\fdd{\widetilde{P}_{1}}{x_1} +
\widetilde{P}_{1} \fdd{\Phi_1}{x_1}\right)(-L) - D \left(
\fdd{\widetilde{P}_{1}}{x_1} +
\widetilde{P}_{1} \fdd{\Phi_1}{x_1}\right)(L).
\end{eqnarray}
with the boundary conditions
\begin{equation}
\widetilde{P}_2(-L,x_2)=\widetilde{P}_2(x_1,L)=0,
\qquad \widetilde{P}_1(-L)=\widetilde{P}_1(L)=0.
\end{equation}
and
\begin{equation}
\lim_{x_1 \rightarrow x_2} \left(\pd{\widetilde{P}_2}{x_1} + \widetilde{P}_2
\pd{\Phi_2}{x_1}\right)
= \lim_{x_1 \rightarrow x_2} \left(\pd{\widetilde{P}_2}{x_2} + \widetilde{P}_2
\pd{\Phi_2}{x_2} \right).\label{SSFP5}
\end{equation}
\end{subequations}
\begin{figure}[t]
\centering
\subfigure[ Histogram ]
{\label{fig:SSProb2}
\includegraphics[width=2.35in]{Figure2_3a}
}
\subfigure[ $\widetilde{P}_2(x_1,x_2)$ ]
{\label{fig:FProb2}
\includegraphics[width=2.5in]{Figure2_3b}
}
\caption{Using the parameters in \eqref{parameter}, the
stationary probability density of a $2$-ion channel is computed as
{\rm (a)}
histogram from Brownian dynamics simulation; {\rm (b)} solution of
\eqref{SSFP1}-\eqref{SSFP5}. Here $x_1$ is the position
of the first ion and $x_2$ is the position of the second ion; since we
label the ions such that $x_1<x_2$ the state space is a triangle.
}
\label{fig:Prob2}
\end{figure}
We solve \eqref{SSFP1}-\eqref{SSFP5} by the finite element PDE solver
{\em Comsol} with $28800$ elements. The stationary
distribution $\widetilde{P}_2(x_1,x_2)$ is shown in
Fig. \ref{fig:FProb2}, and $\widetilde{P}_1(x_1)$ is shown in
Fig. \ref{fig:FProb1}. We see that these agree with the histograms in
Fig. \ref{fig:SSProb2} and Fig. \ref{fig:SSProb1}
obtained from Brownian dynamics simulations.
We see that $\widetilde{P}_2(x_1,x_2)$ is localised around two discrete
states near $(x_-, \xi)$ and $(\xi, x_+)$, while
$\widetilde{P}_1(x_1)$ is localised around $x_1=\xi$. The most
likely path between the two states of $\widetilde{P}_2(x_1,x_2)$ can
also be faintly seen.
This localisation of $\widetilde{P}_2$ and $\widetilde{P}_1$ motivates
the definition of a small number of discrete states which the system
can adopt, which is the basis for the discrete transition rate
theory described
in the next section.
\begin{figure}[t]
\centering
\subfigure[ Histogram ]
{\label{fig:SSProb1}
\includegraphics[width=2.4in, height=2.0in]{Figure2_4a}
}
\subfigure[ $\widetilde{P}_1(x_1)$ ]
{\label{fig:FProb1}
\includegraphics[width=2.4in, height=2.0in]{Figure2_4b}
}
\caption{Using the parameters in \eqref{parameter}, the stationary
probability density of the one-ion state is computed by {\rm (a)}
histogram from
Brownian dynamics simulation; {\rm (b)} solution of
\eqref{SSFP1}-\eqref{SSFP5}. Here $x_1$ is the position
of the single ion in the channel.}
\label{fig:Prob1}
\end{figure}
\section{Discrete transition rate theory} \label{sec:rate}
We saw in our two-ion example (Figs. \ref{fig:Prob2} and \ref{fig:Prob1})
that $\widetilde{P}_2$ was mainly localised around
two regions in state space, while $\widetilde{P}_1$ was mainly
localised around one region.
Suppose, in general, that when there are $k$ ions in the channel the
stationary probability distribution $\widetilde{P}_k({x}_1,
\ldots, {x}_k)$
is mainly localised around $L_k$ small regions. Let us denote these
regions by
$$
{S}^{(i)}_k \subset \Gamma_k
\quad
\mbox{for} \;
i=1,\ldots, L_k;
$$
then $\widetilde{P}_k
$ is very small outside $\cup_{i=1}^{L_k} {S}^{(i)}_k$, so that
$$
\int_{\Gamma_k\backslash \cup_{i=1}^{L_k} {S}^{(i)}_k}
\widetilde{P}_k ({x}_1, \ldots, {x}_k) \, \mathrm{d}x_1 \ldots \mathrm{d}x_k \approx 0.
$$
The idea of
discrete rate theory is to replace the continuous variable
$\widetilde{P}_k$ with a set of discrete probabilities
corresponding to the states $S_k^{(i)}$, so that
\[
\widetilde{P}^{(i)}_k=\int_{{S}^{(i)}_k}
\widetilde{P}_k({x}_1, \ldots, {x}_k)\, \mathrm{d}x_1 \cdots \mathrm{d}x_k
\]
is the (stationary) probability that $(x_1,\ldots,x_k) \in S_k^{(i)}$.
Note that $\widetilde{P}^{(i)}_k$ is just a number: it is independent
of spatial variables. In total there are $L_{\Sigma} = \sum_{k=0}^N
L_k$ states in the channel, and
the sum of the probabilities of all $L_{\Sigma}$ states is unity
according to \eqref{sumP},
that is,
$$
\sum_{k=0}^N \sum_{i=1}^{L_k} \widetilde{P}^{(i)}_k = 1.
$$
We now imagine a Markov chain in which the channel undergoes
transitions from one of these discrete states to another, with the
transition probabilities dependent only on the current state (i.e.
no past history is involved). This Markov chain is illustrated in
Fig. \ref{fig:FP}.
Such Markov chains for ion channels have been previously considered
for a single ion in a many-well channel \cite{Abad:2009:NRT}. However,
multiple occupancy of the channel leads to a more complicated
transition structure.
Since only one ion can enter or leave at once (so that
$\widetilde{P}_k$ is coupled only to
$\widetilde{P}_{k-1}$ and
$\widetilde{P}_{k+1}$) we see that $S^{(i)}_k$ may have
transitions to and from only the states $S^{(\cdot)}_k$,
$S^{(\cdot)}_{k-1}$ and $S^{(\cdot)}_{k+1}$.
The general master equation for the time-dependent probability
$P^{(i)}_k(t)$ is of the form
\begin{eqnarray}
\fdd{}{t} P^{(i)}_k(t)&=& \underbrace{\sum_j \alpha^{(i,j)}_{k-1}
P^{(j)}_{k-1}(t) + \sum_l \beta^{(i,l)}_{k+1} P^{(l)}_{k+1}(t) +
\sum_m \gamma^{(i,m)}_k P^{(m)}_k(t)}_{\mbox{influx}} \nonumber \\
&& \mbox{ }-
\underbrace{\left(\sum_j \alpha^{(j,i)}_k + \sum_l \beta^{(l,i)}_k +
\sum_m \gamma^{(m,i)}_k \right) P^{(i)}_k(t) }_{\mbox{Outflux}} \,,
\label{mastereq}
\end{eqnarray}
where $\alpha^{(i,j)}_{k}$ is the transition rate from
$S^{(j)}_{k}$ to $S^{(i)}_{k+1}$, $\beta^{(i,j)}_{k}$ is the
transition rate from
$S^{(j)}_{k}$ to $S^{(i)}_{k-1}$, and $\gamma^{(i,j)}_k$
is the transition rate from
$S^{(j)}_{k}$ to $S^{(i)}_{k}$. Thus $\alpha$ describes the
influx of a new ion, $\beta$ describes the loss of an ion to the
intracellular or extracellular environment, and $\gamma$ describes a
hopping of the ions within the channel.
In fact we expect many of these rates to be zero, since, for example,
when we add a new ion to a channel it must occupy either the
left-most or rightmost potential well.
The entry rates for new ions
$\alpha^{(i,j)}_k$ may be determined from $H_k$ and $G_k$, which
for the present purposes we are assuming are given.
The ion escape rates $\beta_k^{(i,j)}$
and hopping rates $\gamma_k^{(i,j)}$ can be computed from the
notation of mean
escape time and splitting probability, as described below.
To define the mean escape time we set all the influx probabilities to
zero. We then suppose that the channel initially contains $k$ ions
located at positions ${x}_1, \ldots, x_k$. We define the
mean escape time $\tau_k({x}_1, \ldots, {x}_k)$ to be the average
time before the channel undergoes a transition to a $(k-1)$-ion
configuration, that is, the average time for one ion to leave the
channel.
Using the backward-Kolmogorov equation \cite{Redner:2001:GFP}
it can be shown that $\tau_k$ satisfies
\begin{subequations}
\label{time}
\begin{gather}
\Delta \tau_k - \nabla \Phi_k \cdot \nabla \tau_k = -
\frac{1}{D}, \quad ({x}_1, \ldots,{x}_k) \in \Gamma_k \\
\tau_k = 0 \;\; \mbox{ if }\;\; x_1=-L \mbox{ or }x_k=L.
\end{gather}
\end{subequations}
Then the mean escape time from state ${S}^{(i)}_k$ is given by
\begin{equation}
\tau_k[{S}^{(i)}_k] = \frac{\int_{{S}^{(i)}_k}
\tau_k P_k\, \mathrm{d}x_1 \cdots \mathrm{d}x_k }{
\int_{{S}^{(i)}_k} P_k
\, \mathrm{d}x_1 \cdots \mathrm{d}x_k }. \label{taueqn}
\end{equation}
We now determine a similar expression for $\tau_k[{S}^{(i)}_k]$ using
the discrete transition rate model. Equating the two expressions will
then provide information on the rates $\beta_k^{(i,j)}$ and
$\gamma_k^{(i,j)}$.
To this end suppose that the channel is initially in the state
${S}^{(i)}_k$, so that $P^{(i)}_k(0) = 1$, $P^{(j)}_m(0) = 0$
otherwise. As before the influx rates $\alpha_k$ are set to be
zero.
The master equation \eqref{mastereq} for $P^{(\cdot)}_k$ then
decouples from those for $P^{(\cdot)}_{k-1}$ and $P^{(\cdot)}_{k+1}$,
and we can solve for $P^{(i)}_{k}$. Given this solution we can
determine the mean escape time $\tau_k[{S}^{(i)}_k]$ as
\begin{equation}
\label{timeP}
\tau_k[{S}^{(i)}_k] =
\frac{ \displaystyle
\sum_l\sum_j \int_0^{\infty} t\,\beta^{(l,j)}_k
P^{(j)}_k(t) \,\mathrm{d}t }{
\displaystyle \sum_l \sum_j \int_0^{\infty} \beta^{(l,j)}_k
P^{(j)}_k(t) \,\mathrm{d}t }.
\end{equation}
In calculating the mean escape time we have not distinguished between
the case that the first ion leaves from
left end into intracellular electrolyte $\mathcal{I}$ and the case where
the last ion leaves from right end into extracellular electrolyte
$\mathcal{E}$.
However, it is important that the discrete state model gets the ratio
of these probabilities correct, since this is what causes a net
ionic flux through the channel.
Thus the second piece of information we use to determine the rates
$\beta_k^{(i,j)}$ and $\gamma_k^{(i,j)}$ is the splitting probability
$\rho_k({x}_1, \ldots, {x}_k)$.
This is defined to be the probability of the first ion to exit was
${x}_1$ from the left-hand side of the channel, under the condition
that an ion-escaping event from a $k$-ion to a $(k-1)$-ion channel
has occurred, given that the $k$ ions started in positions $(x_1,
\ldots, x_k)$ initially.
The splitting probability function $\rho_k$ satisfies,
\begin{subequations}
\label{cond}
\begin{gather}
\Delta \, \rho_k - \nabla \Phi_k \cdot \nabla \rho_k = 0 \qquad
\mbox{ for }
({x}_1, \ldots,{x}_k) \in \Gamma_k \label{condprob:a} \\
\rho_k = 1 \;\; \mbox{ on }\;\; x_1=-L, \qquad \rho_k = 0 \;\; \mbox{
on }\;\; x_{k}=L.
\end{gather}
\end{subequations}
As with $\tau_k$, we can now calculate the splitting probability for
state ${S}^{(i)}_k$ as
\begin{equation}
\rho_k[{S}^{(i)}_k] = \frac{\int_{{S}^{(i)}_k}
\rho_k P_k \, \mathrm{d}x_1 \cdots \mathrm{d}x_k }{
\int_{{S}^{(i)}_k} P_k
\, \mathrm{d}x_1 \cdots \mathrm{d}x_k }. \label{rhoeqn}
\end{equation}
To calculate the splitting probability from the Markov chain we need
to separate $\beta_k^{(l,j)}$ into two individual rates representing
the case that an ion leaves to the right into
the extracellular domain, and the case than an ion moves to the left
into the intracellular domain, that is, we write
\[ \beta_k^{(l,j)} = \beta_k^{+(l,j)}+ \beta_k^{-(l,j)}.
\]
Then the probability that an ion escapes to the left given that it
escapes is
\begin{equation}
\label{condP}
\rho_k[{S}^{(i)}_k] = \frac{ \displaystyle \sum_l \sum_j
\int_0^{\infty}\beta^{-(l,j)}_k P^{(j)}_k(t) \,\mathrm{d}t }%
{ \displaystyle \sum_l\sum_{j} \int_0^{\infty}
\beta^{-(l,j)}_k P^{(j)}_k(t) \,\mathrm{d}t + \sum_l\sum_{j} \int_0^{\infty}
\beta^{+(l,j)}_k P^{(j)}_k(t) \,\mathrm{d}t }.
\end{equation}
Note that, as in the case of the escape time $\tau_k$, the right-hand
side depends on ${S}^{(i)}_k$ through the initial condition on
$P_k^{(j)}$.
By equating (\ref{taueqn}) with (\ref{timeP}) and (\ref{rhoeqn}) with
(\ref{condP}) we have a number of equations to help determine the
unknown rates $\beta_k^{(l,j)}$ and $\gamma_k^{(l,j)}$.
Since only the left-most (respectively right-most) ion can escape from
the left-hand side of the channel (respectively right-hand side), many
of the rates $\beta_k^{(l,j)}$ will infact be zero.
If we still do not have enough equations to determine the remaining
$\beta_k^{(l,j)}$ and $\gamma_k^{(l,j)}$, then it will be necessary
to determine some of
the transition rates between internal states. Since these do not
involve a change in the number of ions in the channel, they may be
determined by standard techniques.
Note that to determine the net flow of ions through the channel we
will also have to distingiush between ion entry from the left and from
the right, that is, we should also split
\[ \alpha_k^{(i,j)} = \alpha_k^{-(i,j)}+ \alpha_k^{+(i,j)}.\]
However, in most cases (at least) one of these rates will be zero,
since it is not possible to have the same transition between two states
occuring with an ion entering from either side. The one case where
this is possible is the transition between an empty channel and a
one-ion channel, which occurs in our example below.
\subsection{Example of two-ion channel}
Now we revisit the example of two-ion channel in Section{}
\ref{sec:example} and illustrate the rate theory using the
parameters in \eqref{parameter}.
We have seen that the channel can exist in a
$2$-ion, $1$-ion or $0$-ion state.
From Fig. \ref{fig:Prob1} we see that $\widetilde{P}_1(x_1)$ is
localised around the
single region $x_1=\xi$, so that there is only one metastable
state with one ion in the channel.
From Fig. \ref{fig:Prob2} we see that $\widetilde{P}_2(x_1,x_2)$ is localised
around the two states $(x_-, \xi)$ and $(\xi, x_+)$. Thus there are
two metastable states with two ions in the channel.
Thus our Markov chain comprises the four states
\begin{equation}
{S}^{(1)}_2: \{(x_-, \xi)\}, \quad {S}^{(2)}_2:
\{(\xi, x_+)\}, \quad {S}^{(1)}_1: \{\xi\},\quad
{S}^{(1)}_0: \{\}.
\label{fourstate}
\end{equation}
Thus $L_2 =
2$, $L_1 = 1$, $L_0 = 1$ and overall there are $L_{\Sigma}=4$ states for
this channel.
These states, and the transitions between them, are illustrated in
Fig. \ref{fig:ill1}. The circle at center represents the binding
site $x = \xi$, and two other circles
represent the left and right entry positions $x = x_{\pm}$. A
(green) filled circle
represents a position occupied by an ion. Note that for the
transitions between $S_0^{(1)}$ and $S_1^{(1)}$ it is important to
distinguish between ions entering and leaving from the right and from
the left, so that we can calculate the net flow of ions through the
channel.
Let us first consider the ion entry rates.
We find
\[
\alpha_1^{+(1,1)} = 0,\quad
\alpha_1^{-(1,1)} = \lambda,\quad
\alpha_1^{+(2,1)} = \mu, \quad \alpha_1^{-(2,1)} = 0, \quad
\alpha_0^{+(1,1)} = \mu,\quad
\alpha_0^{-(1,1)} = \lambda.
\]
Note that the two zero values arise because the transition from
$S_1^{(1)}$ to $S_2^{(1)}$ occurs via
an ion entering from the left, while that from $S_1^{(1)}$ to
$S_2^{(2)}$ occurs via
an ion entering from the right. Note also that $\alpha_2^{(i,j)} = 0$ for
all $i,j$ since with two ions in the channel is already full to capacity.
Let us now consider the ion leaving rates $\beta_k^{(i,j)}$. In principle we
have six of these to determine. However, since the transition from
$S_2^{(1)}$ to $S_1^{(1)}$ must occur via an ion leaving from the
left we know that $\beta_2^{+(1,1)}=0$. Similarly the transition from
$S_2^{(2)}$ to $S_1^{(1)}$ must occur via an ion leaving from the
right, so we know that $\beta_2^{-(1,2)}=0$.
This leaves $\beta_2^{-(1,1)}$, $\beta_2^{+(1,2)}$, $\beta_1^{-(1,1)}$
and $\beta_1^{+(1,1)}$ to determine. To these we must add the two
hopping rates $\gamma_2^{(1,2)}$ and $\gamma_2^{(2,1)}$.
\begin{figure}[t]
\centerline
{
\includegraphics[width=4.2in]{Figure3_1}
}
\caption{The transitions between the four different states: the three
circles represent the left entry point, the binding site and the right
entry point; a (green) filled circle indicates the presence of an ion.}
\label{fig:ill1}
\end{figure}
Denoting the state of the system by the probability vector
$\mathbf{P} = (P^{(1)}_2, P^{(2)}_2, P^{(1)}_1,P^{(1)}_0)^T$,
the master equation governing the evolution of the Markov chain is
then
\begin{subequations}
\begin{equation}
\label{transition}
\fdd{\mathbf{P}}{t} = \mathcal{T} \cdot \mathbf{P}(t),
\end{equation}
where the $4 \times 4$ transition matrix $ \mathcal{T}$ is given by
{\small
\begin{equation}
\label{Tmatrix}
\mathcal{T} = \left( \begin{array}{cccc} -\beta^{-(1,1)}_2 -
\gamma^{(2,1)}_2 &\gamma^{(1,2)}_2 &\alpha^{-(1,1)}_1 &0 \\[5mm]
\gamma^{(2,1)}_2 & -\beta^{+(1,2)}_2- \gamma^{(1,2)}_2
&\alpha^{+(2,1)}_1 &0 \\[5mm] \beta^{-(1,1)}_2 &\beta^{+(1,2)}_2
&\parbox{3cm}{$-\alpha^{-(1,1)}_1 - \alpha^{+(2,1)}_1$\\[-1mm]$\mbox{ }-
\beta^{+(1,1)}_1 - \beta^{-(1,1)}_1$} & \alpha^{+(1,1)}_0 + \alpha^{-(1,1)}_0 \\[5mm] 0 &0 &
\beta^{+(1,1)}_1+\beta^{-(1,1)}_1 & -\alpha^{+(1,1)}_0 - \alpha^{-(1,1)}_0
\end{array} \right) .
\end{equation}%
}%
\end{subequations}%
As expected, the sum of each column of the matrix $\mathcal{T}$ is zero
(since the system (\ref{transition}) conserves probability), so the
matrix is rank
deficient. The stationary probability $\widetilde{\mathbf{P}}$ is the
eigenvector associated with zero eigenvalue of matrix $\mathcal{T}$.
To calculate the mean escape time and splitting probability we set the
all entry rates to zero and solve (\ref{transition}).
To emphasize that this is an auxilliary problem and not the true
Markov chain we denote the
probability of lying in each state by $q^{(1)}_2(t),
q^{(2)}_2(t)$, $q^{(1)}_1(t), q^{(1)}_0(t)$ respectively. Then
(\ref{transition}) is
\begin{subequations}
\begin{eqnarray}
\fdd{{q}^{(1)}_2}{t}
& =& -\left( \gamma^{(2,1)}_2 + \beta^{-(1,1)}_2 \right)q^{(1)}_2
+ \gamma^{(1,2)}_2 q^{(2)}_2,\label{1}\\
\fdd{{q}^{(2)}_2}{t} &=& -\left( \beta^{+(1,2)}_2
+ \gamma^{(1,2)}_2 \right) q^{(2)}_2 + \gamma^{(2,1)}_2 q^{(1)}_2, \label{2}\\
\fdd{{q}^{(1)}_1}{t} &=& \beta^{-(1,1)}_2 q^{(1)}_2 + \beta^{+(1,2)}_2
q^{(2)}_2 -\left(\beta^{+(1,1)}_1 + \beta^{-(1,1)}_1
\right)q^{(1)}_1 , \label{3}
\\
\fdd{{q}^{(1)}_0}{t} &=& \left(\beta^{+(1,1)}_1 + \beta^{-(1,1)}_1
\right)q^{(1)}_1. \label{4}
\end{eqnarray}
\end{subequations}
The first two equations decouple. We start by considering the state
$S_2^{(1)}$, that is, we solve (\ref{1})--(\ref{2}) subject to the
initial conditions $q^{(1)}_2(0) = 1$ and $q^{(2)}_2(0)=0$. This gives
\begin{eqnarray}
\left( \begin{array}{c} q^{(1)}_2 \\ q^{(2)}_2 \end{array}
\right) &=& \frac{1}{\,\lambda_1 - \lambda_2} \left( \begin{array}{c}
\lambda_1 + \beta^{+(1,2)}_2 + \gamma^{(1,2)}_2 \\
\gamma^{(2,1)}_2 \end{array} \right)
\exp(\lambda_1\, t) \nonumber \\
&& \mbox{ }+ \frac{1}{\,\lambda_2 - \lambda_1}
\left( \begin{array}{c} \lambda_2 + \beta^{+(1,2)}_2 +
\gamma^{(1,2)}_2\\ \gamma^{(2,1)}_2 \end{array} \right)
\exp(\lambda_2\, t),
\end{eqnarray}
where $\lambda_1, \lambda_2$ are two eigenvalues satisfying
\begin{eqnarray*}
\lambda_1 + \lambda_2 &=&
-\left(
\beta^{-(1,1)}_2+\beta^{+(1,2)}_2+\gamma^{(2,1)}_2+\gamma^{(1,2)}_2,
\right)\\
\lambda_1 \lambda_2 &=& \beta^{-(1,1)}_2 \beta^{+(1,2)}_2+
\beta^{-(1,1)}_2 \gamma^{(1,2)}_2 + \beta^{+(1,2)}_2
\gamma^{(2,1)}_2.
\end{eqnarray*}
Using (\ref{timeP}), the mean escape time $\tau_2[{S}^{(1)}_2]$ is
\begin{subequations}
\label{tau2_rho2_tau1_rho1_formula}
\begin{eqnarray}
\tau_{2} [{S}^{(1)}_2] &=&
\frac{\displaystyle
\int_0^{\infty} t \left(\beta^{-(1,1)}_2
q^{(1)}_2(t) + \beta^{+(1,2)}_2 q^{(2)}_2(t) \right) \,\mathrm{d}t}{\displaystyle
\int_0^{\infty}
\beta^{-(1,1)}_2 q^{(1)}_2(t) + \beta^{+(1,2)}_2 q^{(2)}_2(t) \,\mathrm{d}t}
\nonumber
\\
&=&
\frac{\beta^{+(1,2)}_2 + \gamma^{(2,1)}_2 + \gamma^{(1,2)}_2}{\beta^{+(1,2)}_2
\gamma^{(2,1)}_2 + \beta^{-(1,1)}_2 \gamma^{(1,2)}_2 + \beta^{-(1,1)}_2
\beta^{+(1,2)}_2}\,,
\end{eqnarray}
and, using (\ref{condP}), the splitting probability $\rho_2[{S}^{(1)}_2]$ is
\begin{eqnarray}
\rho_2[{S}^{(1)}_2] &=& \frac{\displaystyle\int_0^{\infty}
\beta^{-(1,1)}_2 q^{(1)}_2(t)
\,\mathrm{d}t}{\displaystyle\int_0^{\infty}
\beta^{-(1,1)}_2 q^{(1)}_2(t) + \beta^{+(1,2)}_2
q^{(2)}_2(t) \,\mathrm{d}t} \nonumber \\
&=& \frac{\beta^{-(1,1)}_2 \beta^{+(1,2)}_2 + \beta^{-(1,1)}_2
\gamma^{(1,2)}_2}
{\beta^{-(1,1)}_2 \beta^{+(1,2)}_2 + \beta^{-(1,1)}_2 \gamma^{(1,2)}_2
+ \beta^{+(1,2)}_2 \gamma^{(2,1)}_2}\,.
\end{eqnarray}
Similarly, by applying the initial conditions $q^{(1)}_2(0) = 0$ and
$q^{(2)}_2(0)=1$, we find
\begin{eqnarray}
\tau_2 [{S}^{(2)}_2] &=&
\frac{\beta^{-(1,1)}_2
+ \gamma^{(2,1)}_2 + \gamma^{(1,2)}_2}{\beta^{+(1,2)}_2 \gamma^{(2,1)}_2 +
\beta^{-(1,1)}_2 \gamma^{(1,2)}_2 + \beta^{-(1,1)}_2 \beta^{+(1,2)}_2}\,, \\
1-\rho_2[{S}^{(2)}_2]
&=& \frac{\beta^{-(1,1)}_2 \beta^{+(1,2)}_2 + \beta^{+(1,2)}_2
\gamma^{(2,1)}_2} {\beta^{-(1,1)}_2 \beta^{+(1,2)}_2 +
\beta^{-(1,1)}_2 \gamma^{(1,2)}_2 + \beta^{+(1,2)}_2 \gamma^{(2,1)}_2}.
\end{eqnarray}
Finally we have to consider the escape time and splitting probability
for state ${S}^{(1)}_1$. With the initial condition $q^{(1)}_2(0) =
q^{(2)}_2(0)=q^{(1)}_0(0)=0$, $q^{(1)}_1(0)=1$, equation (\ref{3})
decouples and is easily solved to give
\begin{eqnarray}
\tau_1 [{S}^{(1)}_1]& =&\frac{\displaystyle \int_0^{\infty} t q^{(1)}_1(t)
\,\mathrm{d}t}{\displaystyle \int_0^{\infty} q^{(1)}_1 \,\mathrm{d}t} =
\frac{1}{\beta^{-(1,1)}_1 +
\beta^{+(1,1)}_1},\\
\rho_1[{S}^{(1)}_1] &=&
\frac{\,\beta^{-(1,1)}_1}{\,\beta^{-(1,1)}_1 + \beta^{+(1,1)}_1}.
\end{eqnarray}%
\end{subequations}%
The mean escape times $\tau_2,$ $\tau_1$ and the splitting probability
$\rho_2,$ $\rho_1$
can be obtained by solving \eqref{time} and \eqref{cond}, respectively
with $k=2,$ $1$ by {\em Comsol}. Fixing $U=0$, $\xi = 0$,
the mean escape time $\tau_2$ in triangular domain $\Gamma_2$ is
plotted in Fig. \ref{fig:tau2}, the splitting probability $\rho_2$ in
$\Gamma_2$ is plotted in Fig. \ref{fig:P2L}. Since there is no applied
field across the channel ($U=0$), and the potential well is at the
center,
the external potential is symmetric with $x=0$, therefore both
functions are symmetric with $x_1 + x_2 = 0$. We take the values of
functions at the centre of the different states and obtain
\begin{gather*}
\tau_2 [{S}^{(1)}_2] = \tau_2 (x_-, \xi) = 0.01113, \quad
\rho_2[{S}^{(1)}_2] = \rho_{2} (x_-, \xi) = 0.9964,\\
\tau_2 [{S}^{(2)}_2] = \tau_2 (\xi, x_+)=0.01113, \quad
\rho_2[{S}^{(2)}_2] = \rho_2 (\xi, x_+) =0.0036,\\
\tau_1[{S}^{(1)}_1]=\tau_1(\xi)=3.9385 \times 10^3, \quad
\rho_1[{S}^{(1)}_1]=\rho_1(\xi)=0.5.
\end{gather*}
Equating these expressions to those of
\eqref{tau2_rho2_tau1_rho1_formula} we find six
equations for the six unknown rates. Solving these we find
$$
\beta^{-(1,1)}_2 = \beta^{+(1,2)}_2 = 89.8283, \; \gamma^{(2,1)}_2 =
\gamma^{(1,2)}_2 = 0.3361,\;
\beta^{-(1,1)}_1 = \beta^{+(1,1)}_1 = 1.2695\times 10^{-4}.
$$
Note that by using the mean escape time and the splitting probability
we have not had to estimate the internal hopping rates $\gamma_2$ from the
Fokker-Planck equation, but have been able to determine them from the
auxillary problems we have solved. We will see in
Section{} \ref{sec:geometry} that this is
especially useful when the internal states are not so well defined.
Since there is no external potential gradient in this case ($U=0$) the
rates are symmetric, and there is no net flux through the channel.
However, we can already make some observations.
Firstly, the rates $\beta_1^{\pm(1,1)}$ are tiny compared to the others. Thus the
channel will switch between single and double occupancy, but will
almost never be empty of ions. We will confirm this when we consider
the equilibrium occupancy of the channel in the next section.
Secondly, the exit rates $\beta_2^{-(1,1)}$
and $\beta^{+(1,2)}_2$ are about 270 times as large as the
hopping rates $\gamma^{(2,1)}_2$ and $\gamma^{(1,2)}_2$ and.
This means that, for these values of the
parameters, an incoming ion enters and leaves about 270 times before
it manages to replace the bound ion in the potential well at the
centre of the channel.
\begin{figure}[t]
\centering
\subfigure[$\tau_2(x_1, x_2)$]
{\label{fig:tau2}
\includegraphics[width=2.45in]{Figure3_2a}
}
\subfigure[$\rho_2(x_1,x_2)$]
{ \label{fig:P2L}
\includegraphics[width=2.45in]{Figure3_2b}
}
\caption{The parameters are given in \eqref{parameter}.
{\rm (a)} Mean escape time $\tau_2(x_1, x_2)$ for $2$-ion transiting
into $1$-ion state.
{\rm (b)} Splitting probability $\rho_2(x_1, x_2)$ of ion exiting
from left side under the condition that an ion escaping event occurs.}
\end{figure}
\subsection{Stationary probability of each state}
Now that we have determined all the transition rates in
Fig. \ref{fig:ill1}, the stationary probability for the number of ions
in the channel can be calculated explicitly as
\begin{equation}
\label{P2P1P0_formula}
\begin{aligned}
\widetilde{P}^{(1)}_2 + \widetilde{P}^{(2)}_2 &= \frac{\alpha^{-(1,1)}_1
\tau_2[S^{(1)}_2] + \alpha^{+(2,1)}_1
\tau_2[S^{(2)}_2]}{1+\frac{1}{\tau_1[S^{(1)}_1]}\frac{1}{\alpha^{-(1,1)}_0
+ \alpha^{+(1,1)}_0} + \alpha^{-(1,1)}_1 \tau_2[S^{(1)}_2] +
\alpha^{+(2,1)}_1 \tau_2[S^{(2)}_2]} \approx 0.1002, \\
\widetilde{P}^{(1)}_1&=
\frac{1}{1+\frac{1}{\tau_1[S^{(1)}_1]}\frac{1}{\alpha^{-(1,1)}_0
+ \alpha^{+(1,1)}_0} + \alpha^{-(1,1)}_1 \tau_2[S^{(1)}_2] +
\alpha^{+(2,1)}_1 \tau_2[S^{(2)}_2]} \approx 0.8998, \\
\widetilde{P}^{(1)}_0&=
\frac{\frac{1}{\tau_1(S^{(1)}_1)}\frac{1}{\alpha^{-(1,1)}_0 +
\alpha^{+(1,1)}_0}}{1+\frac{1}{\tau_1(S^{(1)}_1)}\frac{1}{\alpha^{-(1,1)}_0
+ \alpha^{+(1,1)}_0} + \alpha^{-(1,1)}_1 \tau_2(S^{(1)}_2) +
\alpha^{+(2,1)}_1 \tau_2(S^{(2)}_2)} \approx 2.285 \times
10^{-5}.
\end{aligned}
\end{equation}
We see that these agree well with the probabilities
obtained from a Brownian dynamics simulation in (\ref{BDJ}).
The same probabilities may be obtained by integrating the solutions of
Fokker-Planck equations over the configuration space, which gives
\begin{eqnarray}
I_2 &\equiv& \int_{-L}^L \int_{-L}^{x_2} \widetilde{P}_2(x_1, x_2)\,
\mathrm{d}x_1\,
\mathrm{d}x_2 \approx 0.1001, \nonumber \\
I_1 &\equiv& \int_{-L}^L
\widetilde{P}_1(x_1)\, \mathrm{d}x_1
\approx 0.8991,
\qquad
I_0 \equiv \widetilde{P}_0 \approx 8.3 \times
10^{-4}.
\label{FPprob}
\end{eqnarray}
\section{Current-voltage curve} \label{sec:IV}
The most important characteristic of an ion channel is its
conductance. In this section, we investigate the channel conductance by
examining the current-voltage curve for various values of the channel
parameters.
The potential difference across a channel is usually around $100 \sim 200$
mV. We therefore vary the potential gradient $U$ in the range $[-0.1,
0.1]$ V nm$^{-1}$, which gives a voltage drop in the range $[-200, 200]$ mV
for a channel of length $2$ nm.
Following the framework in Section{} \ref{sec:rate}, we compute
the transition rates corresponding to each given value of $U$.
By examining the transition network in Fig. \ref{fig:ill1} we see
that there are two different paths which lead to ions moving from the
intracellular (left-hand side) to the extracellular (right-hand side)
domain, namely
\begin{equation*}
\mbox{PATH 1}:\;\; S^{(1)}_1 \xrightarrow{\alpha^{-(1,1)}_1} S^{(1)}_2
\xrightarrow{\gamma^{(2,1)}_2} S^{(2)}_2 \xrightarrow{\beta^{+(1,2)}_2}
S^{(1)}_1, \qquad
\mbox{PATH 2}:\;\; S^{(1)}_1 \xrightarrow{\beta^{-(1,1)}_1} S^{(1)}_0
\xrightarrow{\alpha^{-(1,1)}_0} S^{(1)}_1.
\end{equation*}
Both paths start with a channel with one ion bound at the potential
well. In Path 1 another ion first enters the channel from
the left-hand source to produce a two-ion channel. The two ions then hop to
the right so the new ion lies in the potential well at the centre of the
channel. The ion released from this well then exits the channel at the
right. Thus in Path 1 we can think of a new ion coming in and
knocking the present ion out the other side.
In Path 2 the ion in the channel first leaves from the right to leave
an empty channel, and then a new ion enters from the left.
By considering the transition rates we can determine the relative
importance of each of these mechanisms. For the parameters in
(\ref{parameter}) the rate $\beta^{-(1,1)}_1$ is tiny and Path 1
dominates the current. Note that at equilibrium the ion flux entering
from the left is balanced by the flux which leaves from right for
each path, so that
$$
\alpha^{-(1,1)}_1 \widetilde{P}^{(1)}_1 = \beta^{+(1,2)}_2
\widetilde{P}^{(2)}_2, \qquad
\alpha^{-(1,1)}_0 \widetilde{P}^{(1)}_0 =
\beta^{-(1,1)}_1 \widetilde{P}^{(1)}_1.
$$
Similarly ions flow from right to left via the paths,
\begin{equation*}
\mbox{PATH 3}:\;\; S^{(1)}_1 \xrightarrow{\alpha^{+(2,1)}_1} S^{(2)}_2 \xrightarrow{\gamma^{(1,2)}_2} S^{(1)}_2 \xrightarrow{\beta^{-(1,1)}_2} S^{(1)}_1, \qquad
\mbox{PATH 4}:\;\;S^{(1)}_1 \xrightarrow{\beta^{+(1,1)}_1} S^{(1)}_0\xrightarrow{\alpha^{+(1,1)}_0} S^{(1)}_1,
\end{equation*}
and in equilibrium
$$
\alpha^{+(2,1)}_1 \widetilde{P}^{(1)}_1 = \beta^{-(1,1)}_2
\widetilde{P}^{(1)}_2, \qquad
\alpha^{+(1,1)}_0 \widetilde{P}^{(1)}_0 =
\beta^{+(1,1)}_1 \widetilde{P}^{(1)}_1.
$$
Combining the current from each path the net current is given by
\begin{eqnarray}
\label{current_1well}
I &=& e \Big( \alpha^{-(1,1)}_0 \widetilde{P}^{(1)}_0 +
\alpha^{-(1,1)}_1 \widetilde{P}^{(1)}_1 - \beta^{+(1,1)}_1
\widetilde{P}^{(1)}_1 - \beta^{-(1,1)}_2 \widetilde{P}^{(1)}_2 \Big)
\nonumber \\
&=& e \Big( \beta^{-(1,1)}_1 \widetilde{P}^{(1)}_1 + \beta^{+(1,2)}_2
\widetilde{P}^{(2)}_2 - \alpha^{+(1,1)}_0 \widetilde{P}^{(1)}_0 -
\alpha^{+(2,1)}_1 \widetilde{P}^{(1)}_1\Big) ,
\end{eqnarray}
\begin{figure}[t]
\centering
\subfigure[]
{\label{fig:IVplot:lam1}
\includegraphics[width=2.45in]{Figure4_1a}}
\subfigure[]
{\label{fig:IVplot:lam2}
\includegraphics[width=2.45in]{Figure4_1b.png}}
\caption{
I-V curve for $L^2\lambda/D = L^2\mu/D=1$, $5$, $10$ with
$d/L=0.5$; all other
parameters are as in \eqref{parameter}. {\rm (a)} for small
voltages, for
which internal transitions in the channel are rate limiting;
{\rm (b)} for larger voltages, at which saturation
occurs due to the finite rate of entry of ions from the bulk.}
\label{fig:IVplot:lam}
\end{figure}%
\begin{figure}[t]
\centering
{
\includegraphics[width=2.45in]{Figure4_2}}
\caption{I-V curve for $d/L=0.3$, $0.4$, $0.5$ with $L^2\lambda/D
=L^2\mu/D=5$. All other parameters are as in \eqref{parameter}.}
\label{fig:IVplot:d}
\end{figure}%
Next we study how the conductance of the channel varies with the
external potential energy parameter $d/L$
and the dimensionless entry rates $L^2\lambda/D$ and
$\mu L^2/D$.
We plot in Fig.~\ref{fig:IVplot:lam} the current-voltage (I-V) curve
for different entry rates
when $d/L=0.5$. For large voltages the current saturates since it is
limited by the entry rate of ions $\lambda$ and $\mu$; this effect is
illustrated in Fig.~\ref{fig:IVplot:lam2}.
In
Fig.~\ref{fig:IVplot:d} we plot the I-V curve for various values of $d$
with a fixed entry rate.
The slopes of these curves give the conductance of channel, which
grows initially with increasing voltage until diminishing as
saturation sets in. We see that
as the dimensionless entry rate increases,
or the potential well gets shallower,
the conductance of the channel increases.
\section{Optimal geometry of potential} \label{sec:geometry}
\begin{figure}[t]
\centering
\subfigure[$\Phi$ vs. $x$]
{ \label{fig:pot_group_1well:a}
\includegraphics[width=2.45in]{Figure5_1a}}
\subfigure[$Z$ vs. $d$]
{\label{fig:pot_group_1well:b}
\includegraphics[width=2.45in]{Figure5_1b}}
\label{fig:pot_gropu_1well}
\caption{{\rm (a)} A group of potential energy wells for $d = 0.1, 0.2, 0.3,
\ldots,0.9, 1.0$ nm are plotted by solid curves respectively from
narrow to wide well, the darker curve corresponds to $Z = e$,
$d/L=0.5$, and the dotted curve corresponds the energy drop due to
applied field $U=-0.05$ V nm$^{-1}$.
{\rm (b)} The external charge $Z$ as a function of $d$. }
\end{figure}
We showed that a channel with shallower potential well has larger
conductivity in Fig. \ref{fig:IVplot:d}, if all other physical
parameters are fixed.
However, the depth of the potential well is not the only factor that
determines the ion flux. Abad et al \cite{Abad:2009:NRT} studied
the flux through a channel of capacity $N=1$
with symmetric M-shape potential energy, and showed that there exists
a critial ratio $\sigma$ of the width of potential well over the
length of channel, at which
the flux is maximized. This optimal geometry of potential energy
requires the potential well is neither too narrow ($\sigma \to 0$) nor
too wide ($\sigma \to 1$).
We now perform a similar analysis of the multi-ion channel.
For our $N=2$ example we study how the shape of an external potential
on the protein boundary affects the conductivity of the
channel. We vary the distance $d$ and carefully choose the external
charge as
\[
Z = e\, \frac{\displaystyle 2-\left(1+0.5^2\right)^{-0.5}}{\displaystyle
d^{-1}L -\left(1+d^2L^{-2}\right)^{-0.5}},
\]
so that the depths
of the resulting potential wells are all the same (and so that $Z=e$,
when $d/L=0.5$); the variation of the potential well with $d$ is shown in
Fig. \ref{fig:pot_group_1well:b}.
The resulting group of potential wells with applied field $U=-0.05$ V
nm$^{-1}$ are plotted in Fig. \ref{fig:pot_group_1well:a}, the darker
curve corresponds to $Z=e$, $d=0.5 L$,
the dotted curve shows how the potential wells tilt with the applied field.
Obviously, when $d$ is small, we have a steep potential drop
near the binding site, and a relatively flat energy landscape near the ends
of the channel. When $d/L$ gets large, the potential well tends to a
curve
that is steeper near the end of the channel and flatter near the binding site.
\begin{figure}[htbp]
\centering
\subfigure[$d=0.1$ nm]
{\label{fig:P2:a}
\includegraphics[width=1.65in]{Figure5_2a}}
\subfigure[$d=0.2$ nm]
{\label{fig:P2:b}
\includegraphics[width=1.65in]{Figure5_2b}}
\subfigure[$d=0.3$ nm]
{\label{fig:P2:c}
\includegraphics[width=1.65in]{Figure5_2c}}
\subfigure[$d=0.4$ nm]
{\label{fig:P2:d}
\includegraphics[width=1.65in]{Figure5_2d}}
\subfigure[$d=0.5$ nm]
{\label{fig:P2:e}
\includegraphics[width=1.65in]{Figure5_2e}}
\subfigure[$d=0.6$ nm]
{\label{fig:P2:f}
\includegraphics[width=1.65in]{Figure5_2f}}
\subfigure[$d=0.7$ nm]
{\label{fig:P2:g}
\includegraphics[width=1.65in]{Figure5_2g}}
\subfigure[$d=0.8$ nm]
{\label{fig:P2:h}
\includegraphics[width=1.65in]{Figure5_2h}}
\subfigure[$d=0.9$ nm]
{\label{fig:P2:i}
\includegraphics[width=1.65in]{Figure5_2i}}
\caption{Fixing $\lambda=\mu=5$ ns$^{-1}$, $U=-0.05$ V nm$^{-1}$, we
plot the stationary probability distribution $\widetilde{P}_2(x,y)$
for $d =0.1, 0.2, 0.3$,
$0.4, 0.5, 0.6$, $0.7, 0.8, 0.9$ nm, which are obtained by solving
\eqref{FP2} numerically.
It shows the change from two distinct states at $(-0.9, 0)$ and $(0, 0.9)$
to a ridge of high probability regime to one distinct state near
$(-0.45, 0.45)$, with the change of geometry of potential well.}
\label{fig:P2}
\end{figure}
Now we fix $U=-0.05$ V nm$^{-1}$, $\lambda= \mu=5$ ns$^{-1}$, and plot
in Fig. \ref{fig:P2} the stationary probability density function
$\widetilde{P}_2(x_1, x_2)$ obtained by solving
stationary Fokker-Planck equations numerically
for various values of $d$. Recall that since $x_1
< x_2$, we only look at the upper left triangular domain.
We observe that when the potential well is very narrow and steep at
the binding site with $d=0.1$ nm, the probability distribution of two
ions
is localized at two tiny spots around $(x_-, \xi)$ and $(\xi, x_+)$.
As $d$ increases until $d=0.5$ nm, the two spots grow a little larger and
shift slightly away from
$(x_-, \xi)$ and $(\xi, x_+)$. Thus for $d \in [0.1, 0.5]$ nm we can
justify the use of our four-state rate theory.
For $d$ between $0.5$ nm and $0.6$ nm, a ridge of high probability
distribution
emerges that connects previous two spots at its two tails.
This implies that instead of the two ions being trapped at one of two
states, the two can wander back and forth freely between these two
states.
As $d$ increases even further to $d=0.9$ nm, the ridge shrinks to its
center peak, which corresponds to the two ions both sitting in the
potential well. Thus, for larger values of $d$, we have effectively
only one state for the two-ion occupied channel.
\begin{figure}[t]
\label{fig:S3S4_cmp}
\centering
\subfigure[]
{ \label{fig:tau2_d}
\includegraphics[width=2.45in]{Figure5_3a}}
\subfigure[]
{ \label{fig:rho2_d}
\includegraphics[width=2.45in]{Figure5_3b}}
\subfigure[]
{ \label{fig:beta_1well}
\includegraphics[width=2.45in]{Figure5_3c}}
\subfigure[]
{ \label{fig:gamma_1well}
\includegraphics[width=2.45in]{Figure5_3d}}
\caption{Fixing $U=-0.05$ V nm$^{-1}$, we numerically solve
\eqref{time} and \eqref{condP} and obtain
the mean escape times and left splitting probabilities at two
states $S_2^{(1)}=(-0.9,0)$ and $S_2^{(2)}=(0,0.9)$ for
$d$ between 0.1 nm and 1 nm, which
determine the escaping rates and transition rates by
\eqref{tau2_rho2_tau1_rho1_formula}. We compare it with the results
of the three-state simplified model $(\ref{threestate})$.
{\rm (a)} The mean escape time $\tau_2$ vs. $d$ at different states.
{\rm (b)} The splitting probability $\rho_2$ vs. $d$ at different states.
{\rm (c)} The escaping rates $\beta^{(1)}=\beta_2^{-(1,1)}$
and $\beta^{(2)}=\beta_2^{+(1,2)}$.
{\rm (d)} The transition rates $\gamma^{(1)}=\gamma_2^{(2,1)}$
and $\gamma^{(2)}=\gamma_2^{(1,2)}$.
Note that those rates only depend on the mean escape time and
left splitting probability, and are independent of entry rates.
}
\end{figure}
Thus, for large $d$, we can define a single-chained three-state system
\begin{equation}
{S}^{(1)}_2: \{((x_-+\xi)/2, (\xi+x_+)/2)\}, \quad
{S}^{(1)}_1: \{\xi \}, \quad
{S}^{(1)}_0: \{\},
\label{threestate}
\end{equation}
which is similar to four-state system in Fig. \ref{fig:ill1},
except that ${S}^{(1)}_2 = {S}^{(2)}_2$ are combined
and there is no hopping
$\gamma^{(1,2)}_2$ and $\gamma^{(2,1)}_2$ between them.
Following the framework in Section{} \ref{sec:rate}, all
the entry rates $\alpha^{-(1,1)}_0 = \alpha^{-(1,1)}_1 = \mu$
and $\alpha^{+(1,1)}_0 = \alpha^{+(1,1)}_1 = \lambda$ are
prescribed, and the escaping rates are easily calculated as
\[ \beta^{-(1,1)}_k = \frac{\,\rho_k(S^{(1)}_k)}{\,\tau_k(S^{(1)}_k)},
\quad \beta^{+(1,1)}_k = \frac{1-\rho_k(S^{(1)}_k)}{\,\tau_k(S^{(1)}_k)} ,
\quad k=1, 2.
\]
In Fig. \ref{fig:tau2_d}, we plot the mean escape time $\tau_2$ from
the left state $S_2^{(1)}=(-0.9,0)$ (black solid curve) and the right
state $S_2^{(2)}=(0,0.9)$ (black dash-dotted curve)
in four-state formulation, and compare them with $\tau_2$ from the
balanced state $S_2^{(1)}=(-0.45, 0.45)$ (red solid curve) in the
three-state formulation (\ref{threestate}).
Since the potential well is broader as $d$ increases, the second
ion (the one which is not trapped in the well) feels its effect more,
resulting in an exponential increase in the mean escape time. In a
channel with descending
voltage from left to right ($U=-0.05$ V nm$^{-1}$), it takes a longer
time for two ions in the left state $(-0.9, 0)$ to escape than two
ions in the
right state $(0, 0.9)$, so the black solid
curve is above the black dashed curve.
For two ions in the balanced state $S_2^{(1)}=(-0.45, 0.45)$ in the
middle of channel, it takes an even longer time, so the red solid
curve is above the two black curves.
Notice that the ratio $\tau_2(-0.9,0)/\tau_2(0, 0.9)$ is
largest when $0.5 < d < 0.6$. Note also that for small $d$ the
three-state formulation is invalid (so $\tau_2(-0.45, 0.45)$ is
meaningless) but that for large $d$ the ratio $\tau_2(-0.45,
0.45)/\tau_2(-0.9,0)$ is close to $1$: for a broad potential the
mean escape time is insensitive to the precise initial
position of the two ions.
In Fig. \ref{fig:rho2_d} we plot the left splitting probability
$\rho_2$ at the left state $S_2^{(1)}=(-0.9,0)$ (black solid curve)
and the right splitting probability $1-\rho_2$
at the right state $S_2^{(2)}=(0,0.9)$ (black dash-dotted curve) in
four-state formulation. For large $d$, the external potential has a
large gradient near both
ends of the channel (as shown in Fig. \ref{fig:pot_group_1well:a}),
which pulls the new ion introduced at either source towards the center
of channel, and into balance with the existing ion
at around $(-0.45, 0.45)$. So new entering ions at each source are
less likely to leave from the same end of the channel, and both the left
splitting probability at
left state (black solid curve) and the right splitting probability at
right state (black dash-dotted curve) decrease monotonically as $d$
increases. In addition, the left-splitting
probability $\rho_2$ at the balanced state $S_2^{(1)}=(-0.45, 0.45)$
in the three-state formulation is plotted by the red solid curve,
which overlaps with $\rho_2(-0.9, 0)$. As with the escape time, the
splitting probability is insenstive to the initial position of the
ions, and is dominated by the effects of the potential.
Next we compare the escape rates $\beta$ in the two formulations in
Fig. \ref{fig:beta_1well}. The black solid curve shows
the rate at which an ion at left state $S_2^{(1)}=(-0.9,0)$ escapes
from the left side,
the black dash-dotted curve plots the rate at which an ion at right
state $S_2^{(2)}=(0,0.9)$ escapes from right side. The left and right
escaping rates at the balanced state $S_2^{(1)}=(-0.45, 0.45)$ in
three-state formulation (\ref{threestate}) are plotted by red solid
curve and red dash-dotted curve, respectively.
All escaping rates drop rapidly as $d$ increases. When $d \ge 0.7$,
the potential well is so broad that the two ions are trapped
in the channel for a long time. In Fig. \ref{fig:gamma_1well}, we plot
the transition rates between states $S_2^{(1)}=(-0.9,0)$ and
$S_2^{(2)}=(0,0.9)$. Due to the inclined voltage, the transition from
left state $S_2^{(1)}$ to $S_2^{(2)}$ occurs more often than the other way,
namely the transition rate $\gamma_2^{(2,1)}$ depicted by solid curve
is above $\gamma_2^{(1,2)}$ by the dash-dotted curve.
When $d<0.4$, the transition rates are very low ($<10^{-4}$
ns$^{-1}$), so the two states are very distinct. For large $d$, the
observed single state in Fig. \ref{fig:P2} can be
treated as average of the two distinct states.
We remark that the mean escape time of the ion from a one-ion channel
$\tau_1(S_1^{(1)}=\{0\})$ is $O(10^5)$ times larger than
$\tau_2(S_2^{(1)})$, so the escape rates $\beta_1^{-(1,1)} \ll
\beta_2^{-(1,1)}$ and
$\beta_1^{+(1,1)} \ll \beta_2^{+(1,2)}$.
By \eqref{P2P1P0_formula} we see that for there to be an appreciable
probability of having no ion in the channel we need
$\tau_1(S_1^{(1)})^{-1} \sim \alpha^{-(1,1)}_0 +
\alpha^{+(1,1)}_0 = \lambda + \mu$.
In our example, we choose the smallest entry rates to be $\lambda =
\mu = 1$, so the resulting $\widetilde{P}_0^{(1)}$ is extremely
small compared to $\widetilde{P}_1^{(1)}$ and
$\widetilde{P}_0^{(1)}$,
and is therefore negligible.
Thus, for any entry rates $\lambda=O(1)$, $\mu=O(1)$, only one-ion
occupancy
(small entry rates) or two-ion occupancy (large entry rates) is
observed most of time (i.e. $\widetilde{P}_1^{(1)} + \widetilde{P}_2^{(1)}
+ \widetilde{P}_2^{(2)} \approx 1$).
In Fig. \ref{fig:P2P1_d}, fixing $\lambda= \mu=5$ ns$^{-1}$, $U=-0.05$
V nm$^{-1}$, we compare the stationary probabilities of two-ion occupancy
(solid curves) and one-ion occupancy (dash-dotted curve) for various values of
$d$ using (i) the four-state formulation (\ref{fourstate}) (black curves),
(ii) the three-state formulation (\ref{threestate}) (red curves), and
(iii) by solving the Fokker-Planck equations using {\em Comsol}
(discrete markers). When $d$ is large, the stationary probabilities
obtained from all three methods agree with each other, which confirms
that the single balanced state in the three-state formulation can be
treated as an average of the two distinct
states (with frequent transitions) in the four-state
formulation. Because the mean escape time grows from $O(10^{-2})$ ns
to $O(10^2)$ ns with $d$ increasing as shown in Fig. \ref{fig:tau2_d},
a constant entry rate $\lambda=\mu=5$ ns$^{-1}$ leads to a transition
from initially one-ion dominant to two-ion
dominant channel.
After obtaining the rates and probabilities, we can compare the flux
through the channel from the rate theory and the solution of the
Fokker-Planck equations.
We integrate the right hand side of \eqref{SSFP2} with respect to
$x_1$ over $[-L, L]$,
which yields
\begin{equation}
\label{integral}
f_{1R} - f_{1L} + (\mu+\lambda) I_0 - (\mu \int_{-L}^{x_+} \widetilde{P}_1 \, d x_1 +\lambda \int_{x_-}^L \widetilde{P}_1 \, d x_1) - (f_{2R}-f_{2L}) = 0,
\end{equation}
where
\[ \begin{aligned}
f_{2L} &= \int_{-L}^L D \left(
\frac{\partial \widetilde{P}_{2}}{\partial x_1} +
\widetilde{P}_{2} \frac{\partial \Phi_2}{\partial x_1}\right)(-L,x_1) \, d x_1
= \int_{-L}^L D \frac{\partial \widetilde{P}_{2}}{\partial x_1}(-L,x_1) \, d x_1 ,\\
f_{2R} &= \int_{-L}^L D \left(
\frac{\partial \widetilde{P}_{2}}{\partial x_2} +
\widetilde{P}_{2} \frac{\partial \Phi_2}{\partial x_2}\right)(x_1,L) \, d x_1
= \int_{-L}^L D \frac{\partial \widetilde{P}_{2}}{\partial x_2}(x_1,L) \, d x_1, \\
f_{1L} &= D \left( \frac{d \widetilde{P}_1}{d x_1} + \widetilde{P}_1 \frac{d \Phi_1}{d x_1} \right)(-L)
= D \, \frac{d \widetilde{P}_1}{d x_1}(-L), \\
f_{1R} &= D \left( \frac{d \widetilde{P}_1}{d x_1} + \widetilde{P}_1 \frac{d \Phi_1}{d x_1} \right)(L)
= D \, \frac{d \widetilde{P}_1}{d x_1}(L).
\end{aligned}
\]
In Table \ref{tabletransitions}, we show that the eight transitions
connected to state $S^{(1)}_1$ in Fig. \ref{fig:ill1} correspond
one by one to the eight terms in \eqref{integral}.
\begin{table}[t]
\begin{center}
\begin{tabular}{|c|c|c|}
\hline
Transitions by escaping & Flux from rate theory & Flux from Fokker-Planck \\
\hline
$S^{(1)}_2 \xrightarrow{\beta^{-(1,1)}_2} S^{(1)}_1$ &
$\beta^{-(1,1)}_2 \widetilde{P}^{(1)}_2$ &
$f_{2L}$ \\
$S^{(2)}_2 \xrightarrow{\beta^{+(1,2)}_2} S^{(1)}_1$ &
$\beta^{+(1,2)}_2 \widetilde{P}^{(2)}_2$ &
$-f_{2R}$ \\
$S^{(1)}_1 \xrightarrow{\beta^{+(1,1)}_1} S^{(1)}_0$ &
$\beta^{+(1,1)}_1 \widetilde{P}^{(1)}_1$ &
$f_{1L}$ \\
$S^{(1)}_1 \xrightarrow{\beta^{-(1,1)}_1} S^{(1)}_0$ &
$\beta^{-(1,1)}_2 \widetilde{P}^{(1)}_1$ &
$- f_{1R}$ \\
$S^{(1)}_1 \xrightarrow{\alpha^{-(1,1)}_1}S^{(1)}_2$ &
$\mu \widetilde{P}^{(1)}_1$ &
$\mu I_1$ \\
$S^{(1)}_1 \xrightarrow{\alpha^{+(2,1)}_1} S^{(2)}_2$ &
$ \lambda \widetilde{P}^{(1)}_1$ &
$\lambda I_1$ \\
$S^{(1)}_0 \xrightarrow{\alpha^{-(1,1)}_0} S^{(1)}_1$ &
$\mu \widetilde{P}^{(1)}_0$ &
$\mu I_0$ \\
$S^{(1)}_0 \xrightarrow{\alpha^{+(1,1)}_0} S^{(1)}_1$ &
$ \lambda \widetilde{P}^{(1)}_0$ &
$\lambda I_0$ \\
\hline
\end{tabular}
\end{center}
\caption{Transitions computer by the four-state model (\ref{fourstate}) and
the hierarchical Fokker-Planck equation.}
\label{tabletransitions}
\end{table}
The left to right flux across the left boundary of the channel is generated by
introducing new ions $\mu I_0 + \mu \int_{-L}^{x_+} \widetilde{P}_1 \, d x_1 $, and the right to
left flux across the
left boundary of the channel is generated by ions leaving the left boundary $f_{1L} + f_{2L}$.
Similarly we have the left to right flux across the right boundary of the channel is generated by
ions leaving the right boundary $ - f_{1R} - f_{2R}$, and the right to left flux across the
right boundary of the channel is generated by introducing new ions
$\lambda I_0 + \lambda \int_{x_-}^L \widetilde{P}_1 \, d x_1$. Thus
the overall fluxes are
\begin{equation}
\label{flux}
f_L = \mu I_0 + \mu \int_{-L}^{x_+} \widetilde{P}_1 \, d x_1 - f_{1L} - f_{2L} , \quad
f_R = - f_{1R} - f_{2R} - \lambda I_0 - \lambda \int_{x_-}^L \widetilde{P}_1 \, d x_1.
\end{equation}
\begin{figure}[t]
\centering
\subfigure[]
{\label{fig:P2P1_d}
\includegraphics[width=2.45in]{Figure5_4a}}
\subfigure[]
{\label{fig:flux_1well}
\includegraphics[width=2.45in]{Figure5_4b}}
\caption{Fixing $\lambda=\mu=5$ ns$^{-1}$, $U=-0.05$ V nm$^{-1}$.
{\rm (a)} We plot the stationary probabilities of $2$-ion and $1$-ion by four-state formulation
$(\ref{fourstate})$
(black curves), three-state formulation $(\ref{threestate})$ (red curves) and
by solving Fokker-Planck equation with $28800$ elements (discrete marker).
{\rm (b)} We plot the current $I$ vs. $d$ by four-state formulation
$(\ref{fourstate})$
(black solid curve) and by three-state formulation $(\ref{threestate})$ (red solid curve), which
are compared with $(f_L+f_R)$ in \eqref{flux} from solution of
the Fokker-Planck equation (discrete marker).
}
\end{figure}
In Fig. \ref{fig:flux_1well}, we fix $ \lambda=\, \mu=5$ ns$^{-1}$,
$U=-0.05$ V nm$^{-1}$, and
plot the current $I$ vs. $d$ from the flux $(f_L+f_R)$ in \eqref{flux}
by discrete open diamands,
obtained by solving \eqref{FP2}. In comparison, the black solid curve
depicts the current obtained by applying the four-state rate theory
(\ref{fourstate}), and the red solid
curve depicts the current by the three-state rate theory (\ref{threestate}).
We see that for a broad potential well with $0.6 < d < 1$,
all three methods reach a good agreement:
the four-state works for large $d$, even though
the stationary probability distribution in Fig. \ref{fig:P2P1_d} shows
there should be three states for large $d$. This is because of our use
of escape times and splitting probabilities to determine the
transition rates in the model, rather than by trying to estimate
hopping rates directly.
However,
with small $d$, the three-state model is not an accurate description of the
Markov process, and the flux quickly becomes inaccurate.
An important observation from Fig. \ref{fig:flux_1well} is that a maximal
current is achieved around $d=0.6$ nm, which means there exists an
optimal
shape of the potential well to conduct ions, even if the depth of the well
remains the same. This result agrees with the argument
in \cite{Abad:2009:NRT} for a single
ion channel with piecewise linear potential energy.
We may explain the existence of an optimal flux by looking at the
stationary probabilities $\widetilde{P}_2$ and $\widetilde{P}_1$ as
a function of $d$ in Fig. \ref{fig:P2P1_d}.
When $d$ is small, the potential well is very narrow and steep near
the binding site, but relatively flat near the end of channel, so any new ion
introduced would escape very quickly from the same end by diffusion,
leaving the old ion in the channel. For example, at $d=0.1$ nm, both
escaping rates $\beta_2\left(S_2^{-(1,1)}\right)$ and
$\beta_2\left(S_2^{+(1,2)}\right)$
are over $500$ ns$^{-1}$. Thus the channel has only
one ion ($\widetilde{P}_1 > 0.8$) most of the time, obviously the process of
ion entering and leaving from the same end
does not generate any through flux. On the other hand, when $d$ is
large, the potential well is very broad and flat near the binding site,
two ions can hardly escape from the channel, as shown by the large
mean escape time in Fig. \ref{fig:tau2_d}, once a new ion is introduced
to the channel,
it quickly moves towards the center, and settles into a balanced
state in the well with the other ion; thus the $2$-ion state dominates
($\widetilde{P}_2
> 0.9$).
In this case the flux is small because two ions are trapped in the
channel for a long time.
When neither $2$-ion or $1$-ion occupancy dominates in the channel,
so that there are adequate transitions between the $2$-ion distinct states and
frequent escapes from the $2$-ion to the $1$-ion state, a large flux is
generated. This explains heuristically why an intermediate potential well has an
optimal geometry.
Finally we investigate how the entry rates affect the optimal flux for
the family of potential wells in
Fig. \ref{fig:pot_group_1well:a} using the four-state Markov chain
formulation (\ref{fourstate}).
Recall that the escape rates $\beta_2^{-(1,1)}$, $\beta_2^{+(1,2)}$ and transition
rates $\gamma_2^{(1,1)}$, $\gamma_2^{(1,2)}$ are determined by the
potential through mean escape time and left splitting probability, and
thus are independent of the entry rates. We fix the applied field
$U=-0.05$ V nm$^{-1}$
and plot in Fig. \ref{fig:Prob_d_vary_mu} the stationary probabilities
$\widetilde{P}_2$ (black) and $\widetilde{P}_1$ (red) for $\lambda=1,
5, 20, 100$ ns$^{-1}$.
The intersection points of each pair of curves at which
$\widetilde{P}_2 = \widetilde{P}_1 \approx 0.5$ for $\lambda=1, 5, 20,
100$ are at $d \approx 0.665, 0.6, 0.54, 0.41$ respectively. This
illustrates the fact that when the potential well is narrow and steep
at the binding site ($d$ small), the escaping rates $\beta_2^{-(1,1)}$
and $\beta_2^{+(1,2)}$ are large (shown in
Fig. \ref{fig:beta_1well}), so the entry rates have to increase in
order to have equal probabilities of $2$-ion and $1$-ion occupancy.
In Fig. \ref{fig:I_d_vary_mu}, we plot the current $I(d)$ for
$\lambda=1, 5, 20, 100$ ns$^{-1}$. As expected, the current increases
as the entry rates increases, but we also find that the value of $d$
at which the current is optimised shifts;
the critical values of $d$ at which optimal flux is achieved are
respectively $d \approx 0.63, 0.6, 0.59, 0.57$. Thus the optimal
value of $d$ slightly decreases as the entry rates increases, which
shows that the larger
escaping rates of tighter potentials require larger entry rates to
optimize the flux.
\begin{figure}[t]
\centering
\subfigure[]
{ \label{fig:Prob_d_vary_mu}
\includegraphics[width=2.45in]{Figure5_5a}}
\subfigure[]
{ \label{fig:I_d_vary_mu}
\includegraphics[width=2.45in]{Figure5_5b}}
\caption{Fixing $U=-0.05$ V nm$^{-1}$.
{\rm (a)} We plot stationary probability $\widetilde{P}_2$ (black) and $\widetilde{P}_1$ (red)
obtained from four-state Markov Chain formulation $(\ref{fourstate})$ for $\lambda=1, 5, 20, 100$ ns$^{-1}$.
The intersection points of each pair of curves at which $\widetilde{P}_2 = \widetilde{P}_1 \approx 0.5$
for $\lambda=1, 5, 20, 100$ are at $d \approx 0.665, 0.6, 0.54, 0.41$ nm, respectively.
{\rm (b)} We plot current obtained from four-state Markov Chain formulation for $\mu=1, 5, 20, 100$ ns$^{-1}$.
The critical values of $d$ at which optimal flux is achieved are respectively
$d \approx 0.63, 0.6, 0.59, 0.57$ nm.
}
\end{figure}
\section{Conclusion} \label{sec:conclusion}
We have presented a set of hierarchical Fokker-Planck equations
describing ion permeation in multi-ion channels, and reduced these
systematically to
a discrete rate theory. The basis of the reduction is the fact that
many channels have internal binding sites at which ions sit,
so that ions transport by hopping between sites on a slow time scale
while oscillating
in the binding sites on a fast time scale. Since the fast oscillation is
not key in determining the conduction rate, we can
reduce the continuous dynamics to the slow transition between the
discrete states,
and thus provide an efficient way to calculate the current through
the channel.
A key component of our reduction was the use of exit times and
splitting probabilities to determine the discrete hopping rates,
rather than trying to estimate these directly using Kramer's theory for
example. This means that the predictions of the discrete model are
accurate even when the internal states are not so well defined.
In contrast to traditional Eyring rate theory \cite{Eyring:1935:ACA} and
the recent study of a one-ion channel in \cite{Abad:2009:NRT}, we have
developed a general theory
for multi-ion channels, and have shown an intricate coupling between
transition rates, mean escape time and splitting probability, due to
the complexity of
the resulting system of Markovian states. The theory is illustrated by
a two-ion channel, which is the most accessible example that
includes the multi-ion complexity. We have investigated how
conductivity of the channel depends on the diffusion coefficient,
potential energy landscape,
and the ion entry rate. By
varying the geometry of the external potential while keeping the
depth fixed, we observed that
when the potential well is narrow and steep at the binding site, the
$1$-ion state dominates, but when it is not the $2$-ion state
dominates.
In between there is an optimal geometry which maximizes the ion flux by
negotiating between these two extremes and allowing
frequent transitions between the $1$-ion and $2$-ion states.
\vskip 5mm
\noindent
{\bf Acknowledgements.}
This publication was based on work supported by Award
No KUK-C1-013-04, made by King Abdullah University of Science
and Technology (KAUST). The research leading to these results has received
funding from the {\it European Research Council} under the
{\it European Community's} Seventh Framework Programme
({\it FP7/2007-2013})/ ERC grant agreement No. 239870.
Radek Erban would also like to thank
Somerville College, University of Oxford, for a Fulford Junior
Research Fellowship; Brasenose College, University of Oxford, for a
Nicholas Kurti Junior Fellowship; the Royal Society for a University
Research Fellowship; and the Leverhulme Trust for a Philip
Leverhulme Prize.
\newpage
\bibliographystyle{siam}
|
{
"timestamp": "2012-06-29T02:01:37",
"yymm": "1206",
"arxiv_id": "1206.6562",
"language": "en",
"url": "https://arxiv.org/abs/1206.6562"
}
|
\section{Introduction}
Two main techniques for proving lower bounds on quantum query complexity are the polynomial method~\cite{beals:polynomial} developed by Beals {\em et al.} in 1998, and the adversary method~\cite{ambainis:adversary} developed by Ambainis in 2000. Both techniques are incomparable. There are functions with adversary bound strictly larger than polynomial degree~\cite{ambainis:polynomialVsQCC}, as well as functions with the reverse relation.
One of the examples of the reverse relation is exhibited by the element distinctness function. The input to the function is a string of length $n$ of symbols in an alphabet of size $q$, i.e., $x = (x_i)\in [q]^n$. We use notation $[q]$ to denote the set $\{1,\dots,q\}$. The element distinctness function evaluates to 0 if all symbols in the input string are pairwise distinct, and to 1 otherwise.
The quantum query complexity of element distinctness is $O(n^{2/3})$ with the algorithm given by Ambainis~\cite{ambainis:distinctness}. The tight lower bounds were given by Aaronson and Shi~\cite{shi:collisionLower}, Kutin~\cite{kutin:collisionLower} and Ambainis~\cite{ambainis:collisionLower} using the polynomial method.
The adversary bound, however, fails for this function. The reason is that the function has 1-certificate complexity 2, and the so-called certificate complexity barrier~\cite{spalek:adversaryEquivalent, zhang:adversaryPower} implies that for any function with 1-certificate complexity bounded by a constant, the adversary method fails to achieve anything better than $\Omega(\sqrt{n})$.
In 2006 a stronger version of the adversary bound was developed by H\o yer {\em et al.}~\cite{hoyer:adversaryNegative}. This is the negative-weight adversary lower bound defined in~\refsec{def}. Later it was proved to be optimal by Reichardt {\em et al.}~\cite{reichardt:adversaryTight, lee:stateConversion}. Although the negative-weight adversary lower bound is known to be tight, it has almost never been used to prove lower bounds for explicit functions. Vast majority of lower bounds by the adversary method used the old positive-weight version of this method. But since the only competing polynomial method is known to be non-tight, a better understanding of the negative-weight adversary method would be very beneficial. In the sequel, we consider the negative-weight adversary bound only, and we will omit the adjective ``negative-weight''.
In this paper we use the adversary method to prove a lower bound for the following variant of the knapsack packing problem. Let $\mathbb{G}$ be a finite Abelian group, and $t\in \mathbb{G}$ be its arbitrary element. For a positive integer $k$, the {\em k-sum problem} consists in deciding whether the input string $x_1,\dots,x_n\in \mathbb{G}$ contains a subset of $k$ elements that sums up to $t$. We assume that $k$ is an arbitrary but fixed constant. The main result of the paper is the following
\begin{theorem}
\label{thm:ksum}
For a fixed $k$, the quantum query complexity of the $k$-sum problem is $\Omega(n^{k/(k+1)})$ provided that $|\mathbb{G}|\ge n^k$.
\end{theorem}
Clearly, the 1-certificate complexity of the $k$-sum problem is $k$, hence, it is also subject to the certificate complexity barrier.
The result of \refthm{ksum} is tight thanks to the quantum algorithm based on quantum walks on the Johnson graph \cite{ambainis:distinctness}. This algorithm was first designed to solve the $k$-distinctness problem. This problem asks for detecting whether the input string $x\in [q]^n$ contains $k$ elements that are all equal. Soon enough it was realized that the same algorithm works for any function with 1-certificate complexity $k$~\cite{childs:subsetFinding}, in particular, for the $k$-sum problem. The question of the tightness of this algorithm remained open for a long time. It was known to be tight for $k=2$ due to the lower bound for the element distinctness problem. Now we know that it is not optimal for the $k$-distinctness problem if $k>2$~\cite{belovs:learningKDist}. However, \refthm{ksum} shows that, for every $k$, quantum walk on the Johnson graph is optimal for some functions with 1-certificate complexity $k$. Finally, we note that the $k$-sum problem is also interesting because of its applications in quantum Merkle puzzles~\cite{brassard:merkle, kalach:personal}.
Actually, we get \refthm{ksum} as a special case of a more general result we are about to describe. The following is a special case of a well-studied combinatorial object:
\begin{definition}\label{def:orthogonalArray}
Assume $T$ is a subset of $[q]^k$ of size $q^{k-1}$. We say that $T$ is an \emph{orthogonal array of length $k$} iff, for every index $i \in [k]$ and for every vector $x_1,\dots, x_{i-1}, x_{i+1},\dots, x_k \in [q]$, there exists exactly one $x_i \in [q]$ such that $(x_1, \dots, x_k) \in T$.
\end{definition}
For $x=(x_i)\in [q]^n$ and $S\subseteq [n]$ let $x_S$ denote the projection of $x$ on $S$, i.e., the vector $(x_{s_1},\dots,x_{s_\ell})$ where $s_1,\dots,s_\ell$ are the elements of $S$ in the increasing order.
Assume each subset $S$ of $[n]$ of size $k$ is equipped with an orthogonal array $T_S$. The {\em $k$-orthogonal array} problem consists in finding an element of any of the orthogonal arrays in the input string. More precisely, the input $x\in [q]^n$ evaluates to 1 iff there exists $S\subseteq [n]$ of size $k$ such that $x_S \in T_S$.
Consider the following two examples:
\begin{example}
Let $\mathbb{G}$ be a commutative group with $q$ elements and $t\in\mathbb{G}$. $T = \{x \in \mathbb{G}^k: \sum_{i=1}^k x_i = t\}$ is an orthogonal array of length $k$. This choice corresponds to the $k$-sum problem of \refthm{ksum}.
\end{example}
\begin{example}
\label{exm:distinctness}
$T = \{x \in [q]^2: x_1=x_2\}$ is an orthogonal array of length 2. This corresponds to the element distinctness problem from~\cite{belovs:adv-el-dist}.
\end{example}
\goodbreak
\begin{theorem}
\label{thm:orthogonal}
For a fixed $k$ and any choice of the orthogonal arrays $T_S$, the quantum query complexity of the $k$-orthogonal array problem is $\Omega(n^{k/(k+1)})$ provided that $q \ge n^k$. The constant behind big-Omega depends on $k$, but not on $n$, $q$, or the choice of $T_S$.
\end{theorem}
The orthogonal array condition specifies that even if an algorithm has queried $k-1$ elements out of any $k$-tuple, it has the same information whether this $k$-tuple is a 1-certificate as if it has queried no elements out of it. Because of this, the search for a $k$-tuple as a whole entity is the best the quantum algorithm can do. Our proof of \refthm{orthogonal} is a formalization of this intuition.
Let us elaborate on the requirement on the size of the alphabet. It is easy to see that some requirement is necessary. Indeed, the $k$-sum problem can be solved in $O(\sqrt{n})$ queries if the size of $\mathbb{G}$ is $O(1)$, using the Grover search to find up to $k$ copies of every element of $\mathbb{G}$ in the input string, and trying to construct $t$ out of what is found. In some cases, e.g., when $t$ is the identity element and $k$ equals the order of the group, the problem becomes trivial if $n$ is large enough.
The requirement on the size of the alphabet for the element distinctness problem is a subtle issue. The lower bounds by Aaronson and Shi~\cite{shi:collisionLower} and Kutin~\cite{kutin:collisionLower} require the size of the alphabet to be at least $\Omega(n^2)$ that is the same that gives \refthm{orthogonal}. However, later Ambainis~\cite{ambainis:collisionLower} showed that the lower bound remains the same even if one allows the alphabet of size $n$. Reducing the alphabet size in \refthm{orthogonal} is one of our open problems.
\section{Adversary Lower Bound}
\label{sec:def}
In the paper we are interested in the quantum query complexity of solving the orthogonal array problem. For the definitions and main properties of quantum query complexity refer to, e.g., Ref.~\cite{buhrman:querySurvey}. For the purposes of our paper, it is enough with the definition of the adversary bound we give in this section.
Compared to the original formulation of the negative adversary bound~\cite{hoyer:adversaryNegative}, our formulation has two differences. Firstly, in order to simplify notations we call an adversary matrix a matrix with rows labeled by positive inputs, and the columns by the negative ones. It is a quarter of the original adversary matrix that completely specifies the latter. Secondly, due to technical reasons, we allow several rows to be labeled by the same positive input. All this is captured by the following definition and theorem.
\begin{definition}
\label{defn:adversary}
Let $f$ be a function $f\colon {\cal D}\to \{0,1\}$ with domain ${\cal D}\subseteq [q]^n$. Let $\mathcal{\widetilde D}$ be a set of pairs $(x,a)$ with the property that the first element of each pair belongs to ${\cal D}$, and $\mathcal{\widetilde D}_i = \{(x,a)\in \mathcal{\widetilde D} : f(x)=i\}$ for $i\in\{0,1\}$. An {\em adversary matrix} for the function $f$ is a non-zero real $\mathcal{\widetilde D}_1\times\mathcal{\widetilde D}_0$ matrix $\Gamma$. And, for $i\in[n]$, let $\Delta_i$ denote the $\mathcal{\widetilde D}_1\times \mathcal{\widetilde D}_0$ matrix defined by
\[ \Delta_i\elem{(x,a),(y,b)} = \begin{cases} 0,& x_i=y_i; \\ 1,&\text{otherwise}. \end{cases} \]
\end{definition}
\begin{theorem}[Adversary bound \cite{hoyer:adversaryNegative}] \label{thm:adv}
In the notation of Definition~\ref{defn:adversary}, $Q_2(f)=\Omega(\mathrm{Adv}^\pm(f))$, where
\begin{equation}\label{eqn:adversary}
\mathrm{Adv}^\pm(f) = \sup_{\Gamma} \frac{\norm{\Gamma}}{\max_{i\in n} \norm{\Gamma\circ\Delta_i} }
\end{equation}
with the maximization over all adversary matrices for $f$, $\norm{\cdot}$ is the spectral norm, and $Q_2(f)$ is the quantum query complexity of $f$.
\end{theorem}
\begin{proof}
In the original negative-weight adversary bound paper~\cite{hoyer:adversaryNegative}, Eq.~\refeqn{adversary} is proven when $\Gamma$ is a real symmetric ${\cal D}\times {\cal D}$ matrix with the property $\Gamma\elem{x,y}=0$ if $f(x)=f(y)$, and $\Delta_i$ are modified accordingly. We describe a reduction from the adversary matrix in our definition, $\Gamma$, to the adversary matrix $\Gamma'$ in the definition of~\cite{hoyer:adversaryNegative}. Also, let $\Delta'_i$ be the ${\cal D}\times{\cal D}$ matrix with $\Delta'_i\elem{x,y}=1$ if $x_i\ne y_i$, and 0, otherwise.
At first, define $\overline\Gamma$ as
\[
\overline\Gamma = \left( \begin{matrix}
0 & \Gamma^* \\
\Gamma & 0 \\
\end{matrix} \right) \enspace.
\]
Note that $\norm{\overline\Gamma} = \norm{\Gamma}$, and the spectrum of $\overline\Gamma$ is symmetric. Also, for all $i$, $\norm{\overline\Gamma\circ\overline\Delta_i} = \norm{\Gamma\circ\Delta_i}$, where $\overline\Delta_i$ is defined similarly to $\overline\Gamma$.
Let $\delta = (\delta_{x,a})$ be the normalized eigenvalue $\norm{\Gamma}$ eigenvector of $\overline\Gamma$. For all $x,y\in {\cal D}$, let:
\[\delta'_x = \sqrt{\sum_{a:(x,a)\in\widetilde{{\cal D}}} \delta_{x,a}^2}\qquad\mbox{and}\qquad
\Gamma'\elem{x,y} = \frac1{\delta'_x\delta'_y}\sum_{\substack{a:(x,a)\in\widetilde{{\cal D}}\\ b:(y,b)\in \widetilde{{\cal D}}}} \delta_{x,a}\delta_{y,b} \overline\Gamma\elem{(x,a),(y,b)} \enspace. \]
Then it is easy to see that $\delta' = (\delta'_{x})$ satisfies $\|\delta'\|=1$ and $(\delta')^* \Gamma'\delta' = \delta^*\overline \Gamma \delta = \norm{\Gamma}$, hence, $\|\Gamma'\|\ge \|\Gamma\|$.
And vice versa, if ${\varepsilon}'$ is such that $\|{\varepsilon}'\|=1$ and $({\varepsilon}')^* (\Gamma'\circ \Delta'_i){\varepsilon}' = \|\Gamma'\circ \Delta'_i\|$, let ${\varepsilon}_{x,a} = \delta_{x,a} {\varepsilon}'_x/\delta'_x$. Again, $\|{\varepsilon}\|=1$ and ${\varepsilon}^*(\overline\Gamma\circ\overline\Delta_i){\varepsilon} = ({\varepsilon}')^*(\Gamma'\circ\Delta'_i){\varepsilon}'$, hence, $\|\Gamma'\circ \Delta'_i\| \le \|\overline\Gamma\circ \overline \Delta_i\| = \norm{\Gamma\circ\Delta_i}$.
This means that $\Gamma'$ provides at least as good adversary lower bound as $\Gamma$.
\end{proof}
\section{Proof}
In this section we prove \refthm{orthogonal} using the adversary lower bound, \refthm{adv}. The idea of our construction is to embed the adversary matrix $\Gamma$ into a slightly larger matrix ${\widetilde{\Gamma}}$ with additional columns. Then $\Gamma\circ \Delta_i$ is a submatrix of ${\widetilde{\Gamma}}\circ \Delta_i$, hence, $\|\Gamma\circ \Delta_i\|\le \|{\widetilde{\Gamma}}\circ \Delta_i\|$. (In this section we use $\Delta_i$ to denote all matrices defined like in Definition~\ref{defn:adversary}, with the size and the labels of the rows and columns clear from the context.) It remains to prove that $\|{\widetilde{\Gamma}}\|$ is large, and that $\|\Gamma\|$ is not much smaller than $\|{\widetilde{\Gamma}}\|$.
The proof is organized as follows. In \refsec{tGamma} we define ${\widetilde{\Gamma}}$ in dependence on parameters $\alpha_m$, in \refsec{normtGamma} we analyze its norm, in Sections~\ref{sec:Delta1} and~\ref{sec:tGamma1Norm} we calculate $\norm{{\widetilde{\Gamma}}\circ\Delta_i}$, in \refsec{alpha} we optimize $\alpha_m$s, and, finally, in \refsec{Gamma} we prove that the norm of the true adversary matrix $\Gamma$ is not much smaller than the norm of ${\widetilde{\Gamma}}$.
\subsection{Adversary matrix}
\label{sec:tGamma}
Matrix ${\widetilde{\Gamma}}$ consists of $\binom n k$ matrices ${\widetilde{G}}_{s_1,\dots,s_k}$ stacked one on another for all possible choices of $S=\{s_1,\dots,s_k\}\subset[n]$:
\begin{equation}\label{eqn:tGamma}
{\widetilde{\Gamma}} = \left(
\begin{array}{c} {\widetilde{G}}_{1,2,\dots,k} \\ {\widetilde{G}}_{1,2,\dots,k-1,k+1} \\ \dots \\ {\widetilde{G}}_{n-k+1,n-k+2,\dots,n} \\ \end{array}
\right) \enspace.
\end{equation}
Each ${\widetilde{G}}_S$ is a $q^{n-1} \times q^n$ matrix with rows indexed by inputs $(x_1, \dots, x_n) \in [q]^n$ such that $x_S \in T_S$, and columns indexed by all possible inputs $(y_1, \dots, y_n) \in [q]^n$.
We say a column with index $y$ is {\em illegal } if $y_S\in T_S$ for some $S\subseteq[n]$. After removing all illegal columns, ${\widetilde{G}}_S$ will represent the part of $\Gamma$ with the rows indexed by the inputs having an element of the orthogonal array on $S$. Note that some positive inputs appear more than once in $\Gamma$. More specifically, an input $x$ appears as many times as many elements of the orthogonal arrays it contains.
This construction may seem faulty, because there are elements of $[q]^n$ that are used as labels of both rows and columns in ${\widetilde{\Gamma}}$, and hence, it is trivial to construct a matrix ${\widetilde{\Gamma}}$ such that the value in~\refeqn{adversary} is arbitrarily large. But we design ${\widetilde{\Gamma}}$ in a specifically restrictive way so that it still is a good adversary matrix after the illegal columns are removed.
Let $J_q$ be the $q\times q$ all-ones matrix. Assume $e_0,\dots,e_{q-1}$ is an orthonormal eigenbasis of $J_q$ with $e_0=1/\sqrt{q}(1,\dots,1)$ being the eigenvalue $q$ eigenvector. Consider the vectors of the following form:
\begin{equation}
\label{eqn:v}
v = e_{v_1}\otimes e_{v_2}\otimes\cdots\otimes e_{v_n} \enspace,
\end{equation}
where $v_i \in \{0,\dots,q-1\}$. These are eigenvectors of the Hamming Association Scheme on $[q]^n$. For a vector $v$ from~\refeqn{v}, the {\em weight} $|v|$ is defined as the number of non-zero entries in $(v_1,\dots,v_n)$. Let $E_k^{(n)}$, for $k=0,\dots,n$, be the orthogonal projector on the space spanned by the vectors from~\refeqn{v} having weight $k$. These are the projectors on the eigenspaces of the association scheme. Let us denote $E_i = E^{(1)}_i$ for $i=0,1$. These are $q\times q$ matrices. All entries of $E_0$ are equal to $1/q$, and the entries of $E_1$ are given by
\[
E_1\elem{x,y} = \begin{cases}
1-1/q,& x=y;\\
-1/q,& x\ne y.
\end{cases}
\]
Elements of $S$ in ${\widetilde{G}}_S$ should be treated differently from the rest of the elements. For them, we define a $q^{k-1}\times q^k$ matrix $F_S$. It has rows labeled by the elements of $T_S$ and columns by the elements of $[q]^k$, and is defined as follows.
\begin{definition}\label{def:F}
Let
\[E^{(k)}_{<k} = I - E^{(k)}_k = \sum_{i=0}^{k-1} E^{(k)}_i = \sum_{\substack{u = e_{u_1} \otimes \cdots \otimes e_{u_k}\\ |u|<k}} uu^*\]
be the projector onto the subspace spanned by the vectors of less than maximal weight. Let $F_S$ be $\sqrt q$ times the sub-matrix of $E^{(k)}_{<k}$ consisting of only the rows from $T_S$.
\end{definition}
Finally, we define ${\widetilde{\Gamma}}$ as in~\refeqn{tGamma} with ${\widetilde{G}}_S$ defined by
\begin{equation}
\label{eqn:GT}
{\widetilde{G}}_S = \sum_{m=0}^{n-k} \alpha_m F_S \otimes E^{(n-k)}_m \enspace,
\end{equation}
where $F_S$ acts on the elements in $S$ and $E_m$ acts on the remaining $n-k$ elements. Coefficients $\alpha_m$ will be specified later.
\subsection{Norm of ${\widetilde{\Gamma}}$}
\label{sec:normtGamma}
\begin{lemma}\label{lem:normGamma}
Let ${\widetilde{\Gamma}}$ be like in~\refeqn{tGamma} with ${\widetilde{G}}_S$ defined like in~\refeqn{GT}. Then
\begin{itemize}
\item[(a)] $\|{\widetilde{\Gamma}}\| = \Omega(\alpha_0 n^{k/2})$,
\item[(b)] $\|{\widetilde{\Gamma}}\| = O(\max_m \alpha_m n^{k/2})$.
\end{itemize}
\end{lemma}
\pfstart
Fix a subset $S$ and denote $T=T_S$ and $F=F_S$. Recall that $E^{(k)}_{<k}$ is the sum of $uu^*$ over all $u=e_{u_1}\otimes\cdots\otimes e_{u_k}$ with at least one $u_j$ equal to 0, and $F$ is the restriction of $E^{(k)}_{<k}$ to the rows in $T$.
For $u=e_{u_1}\otimes\cdots\otimes e_{u_k}$ and $\ell$ such that $u_\ell = 0$, let $u^{(\ell)}$ denote the $\sqrt{q}$ multiple of $u$ restricted to the elements in $T$. The reason for the superscript is that we consider the following process of obtaining $u^{(\ell)}$: we treat $T$ as $[q]^{k-1}$ by erasing the $\ell$-th element in any string of $T$, then $u^{(\ell)}$ coincides on this set with $u$ with the $\ell$-th term removed.
In this notation, the contribution from $uu^*$ to $F$ equals $u^{(\ell_u)}u^*$, where $\ell_u$ is any position in $u$ containing $e_0$. In general, we do not know how $u^{(\ell)}$s relate for different $\ell$. However, we know that, for a fixed $\ell$, they are all orthogonal; and for any $\ell$, $(e_0^{\otimes k})^{(\ell)}$ is the vector $1/\sqrt{q^{k-1}}(1,\dots,1)$.
Let us start with proving (a). We estimate $\|{\widetilde{\Gamma}}\|$ from below as $w^*{\widetilde{\Gamma}} w'$, where $w$ and $w'$ are unit vectors with all elements equal. In other words, $\|{\widetilde{\Gamma}}\|$ is at least the sum of all its entries divided by $\sqrt{\binom n k q^{2n-1}}$. In order to estimate the sum of the entries of ${\widetilde{\Gamma}}$, we rewrite~\refeqn{GT} as
\begin{equation}
\label{eqn:GT2}
{\widetilde{G}}_S = \alpha_0 e_0^{\otimes(n-1)}(e_0^{\otimes n})^* + {\sum_{u,v}} \alpha_{|v|} (u^{(\ell_u)}\otimes v)(u\otimes v)^* \enspace,
\end{equation}
where the summation is over all $u$ and $v$ such that at least one of them contains an element different from $e_0$. The sum of all entries in the first term of~\refeqn{GT2} is $\alpha_0 q^{n-1/2}$. The sum of each column in each of $(u^{(\ell_u)}\otimes v)(u\otimes v)^*$ is zero because at least one of $u^{(\ell_u)}$ or $v$ sums up to zero. By summing over all $\binom n k$ choices of $S$, we get that $\norm{{\widetilde{\Gamma}}} \ge \alpha_0\sqrt{\binom n k} = \Omega(\alpha_0 n^{k/2})$.
In order to prove (b), express $F_S$ as $\sum_{\ell=1}^k F^{(\ell)}_S$ with $F^{(\ell)}_S = \sum_{u\in U_\ell} u^{(\ell)}u^*$. Here $\{U_\ell\}$ is an arbitrary decomposition of all $u$ such that $U_\ell$ contains only $u$ with $e_0$ in the $\ell$-th position. Define ${\widetilde{G}}^{(\ell)}_S$ as in~\refeqn{GT} with $F_S$ replaced by $F^{(\ell)}_S$, and ${\widetilde{\Gamma}}^{(\ell)}$ as in~\refeqn{tGamma} with ${\widetilde{G}}_S$ replaced by ${\widetilde{G}}^{(\ell)}_S$.
Since all $u^{(\ell)}$s are orthogonal for a fixed $\ell$, we get that
\[
({\widetilde{G}}^{(\ell)})^*{\widetilde{G}}^{(\ell)} = \sum_{u\in U_\ell, v} \alpha_{|v|}^2 (u\otimes v)(u\otimes v)^* \enspace,
\]
thus $\| ({\widetilde{G}}^{(\ell)})^*{\widetilde{G}}^{(\ell)} \| = \max_m \alpha_m^2$. By the triangle inequality,
\[
\|{\widetilde{\Gamma}}^{(\ell)}\|^2 = \left\|({\widetilde{\Gamma}}^{(\ell)})^*{\widetilde{\Gamma}}^{\ell}\right\| = \left\|\sum_S ({\widetilde{G}}_S^{(\ell)})^*{\widetilde{G}}_S^{(\ell)} \right\| \le
\binom n k \max_m \alpha_m^2 \enspace.
\]
Since ${\widetilde{\Gamma}} = \sum_{\ell=1}^k {\widetilde{\Gamma}}^{(\ell)}$, another application of the triangle inequality finishes the proof of (b).
\pfend
\subsection{Action of $\Delta_1$}
\label{sec:Delta1}
The adversary matrix is symmetric in all input variables and hence it suffices to only consider the entry-wise multiplication by $\Delta_1$. Precise calculation of $\|{\widetilde{\Gamma}} \circ \Delta_1\|$ is very tedious, but one can get an asymptotically tight bound using the following trick.
Instead of computing ${\widetilde{\Gamma}} \circ \Delta_1$ directly, we arbitrarily map ${\widetilde{\Gamma}} \overset{\Delta_1}\longmapsto {\widetilde{\Gamma}}_1$ such that ${\widetilde{\Gamma}}_1 \circ \Delta_1 = {\widetilde{\Gamma}} \circ \Delta_1$, and use the inequality $\|{\widetilde{\Gamma}}_1 \circ \Delta_1\| \le 2 \|{\widetilde{\Gamma}}_1\|$ that holds thanks to $\gamma_2(\Delta_1)\le 2$ \cite{lee:stateConversion}. In other words, we change arbitrarily the entries with $x_1 = y_1$. We use the mapping
\begin{equation}\label{eqn:E1}
E_0 \overset{\Delta_1}\longmapsto E_0 \enspace, \qquad E_1 \overset{\Delta_1}\longmapsto -E_0 \enspace, \qquad I \overset{\Delta_1}\longmapsto 0 \enspace.
\end{equation}
The projector $E^{(k)}_{<k}$ is mapped by $\Delta_1$ as
\[
E^{(k)}_{<k} = I - E^{(k)}_k \overset{\Delta_1}\longmapsto E_0 \otimes E_1^{\otimes (k-1)} \enspace.
\]
It follows that
\begin{equation}\label{eqn:F1}
F \overset{\Delta_1}\longmapsto e_0^*\otimes E_1^{\otimes (k-1)} = \sum_{\substack{u=e_{u_1}\otimes\cdots\otimes e_{u_k}\\ u_0=0, |u|=k-1}} u^{(1)} u^* \enspace,
\end{equation}
where $u^{(1)}$ is defined like in the proof of \reflem{normGamma}.
\subsection{Norm of ${\widetilde{\Gamma}}_1$}
\label{sec:tGamma1Norm}
\begin{lemma}\label{lem:normGamma1}
Let ${\widetilde{\Gamma}}$ be like in~\refeqn{tGamma} with ${\widetilde{G}}_T$ defined like in~\refeqn{GT}, and map ${\widetilde{\Gamma}} \overset{\Delta_1}\longmapsto {\widetilde{\Gamma}}_1$ and ${\widetilde{G}}_T \overset{\Delta_1}\longmapsto ({\widetilde{G}}_T)_1$ using \refeqn{E1} and \refeqn{F1}. Then $\|{\widetilde{\Gamma}}_1\| = O(\max_m (\max(\alpha_m m^{(k-1)/2}$,
$(\alpha_m - \alpha_{m+1}) n^{k/2})))$.
\end{lemma}
\pfstart
\renewcommand{\S}{\mathcal{S}}
We have $\|{\widetilde{\Gamma}}_1\|^2 = \|{\widetilde{\Gamma}}_1^* {\widetilde{\Gamma}}_1\| = \|\sum_S ({\widetilde{G}}_S)_1^* ({\widetilde{G}}_S)_1\|$.
Decompose the set of all possible $k$-tuples of indices into $\S_1 \cup \S_2$, where $\S_1$ are $k$-tuples containing 1 and $\S_2$ are $k$-tuples that don't contain 1. We upper-bound the contribution of $\S_1$ to $\|{\widetilde{\Gamma}}_1\|^2$ by $\max_m \alpha_m^2 \binom {m+k-1} {k-1}$ and the contribution of $\S_2$ by $\max_m(\alpha_m - \alpha_{m+1})^2 k \binom {n-1} k$, and apply the triangle inequality.
Let $v = e_{v_1} \otimes \cdots \otimes e_{v_n}$ with $|v| = m+k-1$, and let $S \in \S_1$. Then, by~\refeqn{F1},
\[
({\widetilde{G}}_S)_1 v = \left\{ \begin{tabular}{l l}
$\alpha_m v^{(1)}$, & $v_1=0$ and $|v_S|=k-1$, \\
0, & otherwise.
\end{tabular} \right.
\]
Here $v^{(1)}= e_{v_2} \otimes \cdots \otimes e_{v_n}$ is $v$ with the first term removed and $v_S = \bigotimes_{s\in S} e_{v_s}$.
For different $v$, these are orthogonal vectors, and hence $v$ is an eigenvector of $({\widetilde{G}}_S)_1^* ({\widetilde{G}}_S)_1$ of eigenvalue $\alpha_m^2$ if $v_1=0$ and $|v_S|=k-1$, and of eigenvalue 0 otherwise. For every $v$ with $v_1=0$ and $|v|=m+k-1$, there are $\binom {m+k-1} {k-1}$ sets $S\in \S_1$ such that $({\widetilde{G}}_S)_1 v \ne 0$. Thus, the contribution of $\S_1$ is as claimed.
Now consider an $S\in \S_2$, that means $1 \not\in S$.
\begin{eqnarray*}
{\widetilde{G}}_S &=& \sum_{m=0}^{n-k} \alpha_m F_S \otimes E^{(n-k)}_m \\
&=& \sum_{m=0}^{n-k} \alpha_m F_S \otimes (E_0 \otimes E^{(n-k-1)}_m + E_1 \otimes E^{(n-k-1)}_{m-1}) \\
&\overset{\Delta_1}\longmapsto& \sum_{m=0}^{n-k} \alpha_m F_S \otimes E_0 \otimes (E^{(n-k-1)}_m - E^{(n-k-1)}_{m-1}) \\
= ({\widetilde{G}}_S)_1 &=& \sum_{m=0}^{n-k} (\alpha_m - \alpha_{m+1}) F_S \otimes E_0 \otimes E^{(n-k-1)}_m \enspace.
\end{eqnarray*}
Therefore $({\widetilde{G}}_S)_1$ is of the same form as ${\widetilde{G}}_S$, but with coefficients $(\alpha_m - \alpha_{m+1})$ instead of $\alpha_m$ and on one dimension less. We get the required estimate from \reflem{normGamma}(b).
Since $k = O(1)$, we get the claimed bound.
\pfend
\subsection{Optimization of $\alpha_m$}
\label{sec:alpha}
To maximize the adversary bound, we maximize $\|{\widetilde{\Gamma}}\|$ while keeping $\|{\widetilde{\Gamma}}_1\|=O(1)$. That means, we choose the coefficients $(\alpha_m)$ to maximize $\alpha_0 n^{k/2}$ (\reflem{normGamma}) so that, for every $m$, $\alpha_m \le {m^{(1-k)/2}}$ and $\alpha_m \le \alpha_{m+1} + {n^{-k/2}}$ (\reflem{normGamma1}).
For every $r \in [n]$, $\alpha_0 \le \alpha_r + r {n^{-k/2}} \le {r^{(1-k)/2}} + r {n^{-k/2}}$.
The expression on the right-hand side achieves its minimum, up to a constant, $\alpha_0 = 2\ n^{{k (1-k)}/ (2 (k+1))}$ for $r=n^{k/(k+1)}$. This corresponds to the following solution:
\begin{equation}
\label{eqn:alphas}
\alpha_m = \max\left\{2 - \frac m {n^{k/(k+1)}}, 0\right\} n^{\frac {k (1-k)} {2 (k+1)}}
\end{equation}
With this choice of $\alpha_m$, $\|{\widetilde{\Gamma}}\| = \Omega(\alpha_0 n^{k/2}) = \Omega(n^{k/ (k+1)})$.
\subsection{Constructing $\Gamma$ from ${\widetilde{\Gamma}}$}
\label{sec:Gamma}
The matrix ${\widetilde{\Gamma}}$ gives us the desired ratio of norms of ${\widetilde{\Gamma}}$ and ${\widetilde{\Gamma}}\circ\Delta_i$. Unfortunately, ${\widetilde{\Gamma}}$ cannot directly be used as an adversary matrix, because it contains illegal columns $y$ with $f(y)=1$, that is, $y$ that contain an element of the orthogonal array on $S \subset [n]: |S|=k$, i.e., $y_S \in T_S$. We show that after removing the illegal columns it is still good enough.
\begin{lemma}
Let $\Gamma$ be the sub-matrix of ${\widetilde{\Gamma}}$ with the illegal columns removed. Then $\|\Gamma \circ \Delta_1\| \le \|{\widetilde{\Gamma}} \circ \Delta_1\|$, and $\|\Gamma\|$ is still $\Omega(\alpha_0 n^{k/2})$ when $q \ge n^k$.
\end{lemma}
\pfstart
We estimate $\|\Gamma\|$ from below by $w^* \Gamma w'$ using unit vectors $w, w'$ with all elements equal. Recall Equation \refeqn{GT2}:
\[
{\widetilde{G}}_T = \alpha_0 e_0^{\otimes(n-1)}(e_0^{\otimes n})^* + {\sum_{u,v}} \alpha_{|v|} (u^{(\ell_u)}\otimes v)(u\otimes v)^* \enspace,
\]
where the summation is over all $u$ and $v$ such that at least one of them contains an element different from $e_0$. The sum of each column in each of $(u^{(\ell_u)}\otimes v)(u\otimes v)^*$ still is zero because at least one of $u^{(\ell_u)}$ or $v$ sums up to zero. Therefore the contribution of the sum is zero regardless of which columns have been removed.
By summing over all $\binom n k$ choices of $S$, we get
\[
\|\Gamma\| \ge w^* \Gamma w' = \sqrt{\binom n k} \alpha_0\ (e_0^{\otimes n})_L^* w' \enspace,
\]
where $e_L$ denotes the sub-vector of $e$ restricted to $L$, and $L$ is the set of legal columns. Since both $e_0$ and $w'$ are unit vectors with all elements equal, and $w'$ is supported on $L$, $(e_0^{\otimes n})_L^* w' = \sqrt{|L|/q^n}$.
Let us estimate the fraction of legal columns. The probability that a uniformly random input $y \in [q]^n$ contains an orthogonal array at any given $k$-tuple $S$ is $\frac 1 q$. By the union bound, the probability that there exists such $S$ is at most $\binom n k \frac 1 q$. Therefore the probability that a random column is legal is $\frac {|L|} {q^n} \ge 1 - \binom n k \frac 1 q$, which is $\Omega(1)$ when $q \ge n^k$.
\pfend
Thus, with the choice of $(\alpha_m)$ from~\refeqn{alphas}, we have $\mathrm{Adv}^\pm(f) = \Omega(\alpha_0 n^{k/2}) = \Omega(n^{k/(k+1)})$. This finishes the proof of \refthm{orthogonal}.
\section{Open problems}
Our technique relies crucially on the $n^k$ lower bound on the alphabet size. Can one relax this bound in some special cases? For example, element distinctness is nontrivial when $q \ge n$, but our lower bound only holds for $q \ge n^2$.
A tight $\Omega(n^{2/3})$ lower bound for element distinctness was originally proved by the polynomial method \cite{shi:collisionLower} by reduction via the collision problem. The $k$-collision problem is to decide whether a given function is $1:1$ or $k:1$, provided that it is of one of the two types. One can use an algorithm for element distinctness to solve the 2-collision problem, and thus the tight $\Omega(n^{1/3})$ lower bound for collision in \cite{shi:collisionLower} implies a tight lower bound for element distinctness. Unfortunately, the reduction doesn't go in both directions and hence our result doesn't imply any nontrivial adversary bound for $k$-collision. The simpler non-negative adversary bound is limited to $O(1)$ due to the property testing barrier. Roughly speaking, if every 0-input differs from every 1-input in at least an $\varepsilon$-fraction of the input, the non-negative adversary bound is limited by $O(\frac 1 \varepsilon)$. How does an explicit negative adversary matrix for an $\omega(1)$ lower bound look like?
The recent learning graph-based algorithm for $k$-distinctness \cite{belovs:learningKDist} uses $O(n^{1 - 2^{k-2} / (2^k-1)})$ quantum queries, which is less than $O(n^{k/(k+1)})$ but more than the $\Omega(n^{2/3})$ lower bound by reduction from 2-distinctness. $k$-distinctness is easier than the $k$-sum problem considered in our paper because one can obtain nontrivial information about the solution from partial solutions, i.e., from $\ell$-tuples of equal numbers for $\ell < k$. Can one use our technique to prove an $\omega(n^{2/3})$ lower bound for $k$-distinctness?
The $k$-sum problem is very structured in the sense that all $k$-tuples of the input variables, and all possible values seen on a $(k-1)$-tuple, are equal with respect to the function. The symmetry of this problem helped us to design a symmetric adversary matrix. The nonnegative adversary bound gives nontrivial lower bounds for most problems, by simply putting most of the weight on hard-to-distinguish input pairs, regardless of whether the problem is symmetric or not. Can one use our technique to improve the best known lower bounds for some non-symmetric problems, for example, to prove an $\omega(\sqrt n)$ lower bound for graph collision, $\omega(n)$ for triangle finding, or $\omega(n^{3/2})$ for verification of matrix products?
\subsection*{Acknowledgments}
A.B. would like to thank Andris Ambainis, Troy Lee and Ansis Rosmanis for valuable discussions. We are grateful to Kassem Kalach for informing about the applications of the $k$-sum problem in Merkle puzzles, and for reporting on some minor errors in the early version of the paper.
A.B. is supported by the European Social Fund within the project ``Support for Doctoral Studies at University of Latvia'' and by FET-Open project QCS.
\bibliographystyle{../../habbrvE}
|
{
"timestamp": "2012-08-13T02:00:12",
"yymm": "1206",
"arxiv_id": "1206.6528",
"language": "en",
"url": "https://arxiv.org/abs/1206.6528"
}
|
\section{Introduction}
\label{Introduction}
Observations of the transiting extrasolar planet \object{HD\,209458b}
in the Lyman-$\alpha$ line of atomic hydrogen (\ion{H}{i}) have revealed
that this planet is losing gas (Vidal-Madjar et al.\ 2003).
Subsequent theoretical studies indicate that atmospheric escape (so-called `evaporation')
arises from the intense stellar
X-ray and extreme ultraviolet energy input into the upper atmosphere (Lammer et al.\ 2003;
Lecavelier des Etangs et al.\ 2004; Yelle 2004), leading to moderate
escape rates
for massive hot-Jupiters, or to formation of planetary remnants when strong evaporation
implies a dramatic change in the planet mass (Lecavelier des Etangs et al.\ 2004, 2007;
Charpinet et al.\ 2011).
Despite the importance of evaporation on the fate of planets at
short orbital distances, the physics of the exospheric gas and role of the star-planet system
properties remain debated (Garc\'ia Mun\~oz 2007; Schneiter et al.\ 2007; Holmstr\"om et al.\
2008; Lecavelier des Etangs et al.\ 2008; Murray-Clay et al.\ 2009; Ben-Jaffel \& Sona
Hosseini 2010; Guo 2011). This is further compounded by the limited number of observations
(Vidal-Madjar et al.\ 2004; Ballester et al.\ 2007; Ehrenreich et al.\ 2008; Fossati et al.\
2010; Linsky et al.\ 2010), which include non-spectrally resolved Lyman-$\alpha$
observations of the exoplanet \object{HD\,189733b} (Lecavelier des Etangs et al.\ 2010). \\
\section{Observations, data analysis, and results}
\subsection{Observations}
To address these problems, we observed two transits of HD\,189733b on 6~April 2010 and
7~September 2011 with the Space Telescope Imaging Spectrograph (STIS) onboard the Hubble
Space Telescope (HST). The data consist of time-tagged observations obtained with the G140M
grating, producing time-resolved spectra from 1195 to 1248\,\AA\ with a spectral resolution of
about 20\,km\,s$^{-1}$ at 1215.6\,\AA\ (Lyman-$\alpha$) with exposure times of 1800 to 2100~seconds.
Between each consecutive
HST orbit, data acquisition is interrupted for about 3500~seconds by the Earth's
occultation. The obtained spectra show stellar emission lines of
\ion{H}{i} Lyman-$\alpha$, \ion{Si}{iii} (1206.5\,\AA), \ion{O}{v} (1218.3\,\AA)
and the \ion{N}{v} doublet (1242.8\,\AA\ and 1238.8\,\AA).
\begin{figure*}[tb]
\hbox{
\includegraphics[angle=-90,width=\columnwidth]
{aa_19363_fig1a.eps}
\includegraphics[angle=-90,width=\columnwidth]
{aa_19363_fig1b.eps}
}
\caption[]{Lyman-$\alpha$ emission line of HD\,189733 in April 2010 and September 2011. Spectra
obtained before (black) and during the transits (blue) are displayed as a function of radial velocity
relative to the star. The double-peaked profile results from a single stellar emission line absorbed
at the center by interstellar hydrogen, which produces a broad absorption feature from about -100 to
+50\,km\,s$^{-1}$. While no transit signatures are detected in 2010, two absorption regions are
detected at more than \hbox{3-$\sigma$} during the transit of 2011; these regions are plotted by gray zones.
They are seen at the top of the red wing around +80\,km\,s$^{-1}$ and, most significantly,
in the blue wing with a $\sim$100\,km\,s$^{-1}$ wide absorption around -200\,km\,s$^{-1}$.
}
\label{Lyman-alpha spectra}
\end{figure*}
For both the 2010 and 2011 observations, data were recorded during two HST orbits before
the transit, one orbit during the transit, and one orbit after the transit. We measured
the transit signature of the planetary atmosphere by comparing spectra taken during
transit to those taken before and after the transit event. For each stellar line, we
calculated transit light curves of the total emission flux and of the flux within given
wavelength ranges; the signature of the planetary atmosphere is detected as an excess
absorption during the planet's transit. Because no atmospheric signature is detected in
the \ion{Si}{iii}, \ion{O}{v}, or \ion{N}{v} lines,
hereafter we consider only the Lyman-$\alpha$ observations.
The Lyman-$\alpha$
line is the brightest stellar line in our STIS spectra from 1195 to 1248\,\AA, and has a
total flux of about 1.8$\times$10$^{-13}$\,erg\,s$^{-1}$\,cm$^{-2}$
(about 10 times brighter than for \object{HD\,209458}).
With the resulting high signal-to-noise ratio, there is no need to co-add several
observations to detect the signature of the atmosphere, which was necessary for the STIS observations
of HD\,209458b and for the observations of HD\,189733b with the Advanced Camera for
Surveys (ACS) of the HST (Vidal-Madjar et al.\ 2003; Lecavelier des Etangs et al.\ 2010).
This allows for the first time a search for temporal
variations in the physical conditions of the planetary upper atmosphere between two
observational epochs (here 17 months apart), as performed for the deeper atmosphere using
emission spectroscopy (Grillmair et al.\ 2008).
\subsection{Detection of temporal variations in the evaporating atmosphere}
The Lyman-$\alpha$ emission line from \object{HD\,189733} is spectrally resolved. At the resolution of the
G140M grating, the line is composed of two peaks separated by a deep absorption due
to interstellar atomic hydrogen (Fig.~\ref{Lyman-alpha spectra}).
In the raw data, the stellar emission line is superimposed with the geo-coronal
airglow emission from the upper atmosphere of the Earth (Vidal-Madjar et al.\ 2003).
This geo-coronal emission can be well estimated and removed in the final spectrum
using the CALSTIS data pipeline (version 2.32 of 5 November 2010). Independent
re-analysis of raw data using the same methodology as for STIS observations of
HD\,209458 (D\'esert et al.\ 2004) confirms that the airglow does not affect our
measurements. Moreover, because we used a narrow spectrograph slit of 0.1\arcsec, the
airglow contamination is limited to the central part of the Lyman-$\alpha$ line and
does not contaminate the line wings where the transit atmospheric signatures are
detected (see below).
The data of September 2011 exhibit a notably low airglow emission level.
The Lyman-$\alpha$ observations of April 2010 do not show
an atmospheric transit signature. In these data, the transit depth for the total flux of
the whole Lyman-$\alpha$ line is 2.9$\pm$1.4\%, which agrees with the 2.4\% transit depth
of the planet body alone as seen from the visible to the near-infrared
(D\'esert et al.\ 2009, 2011; Sing et al.\ 2011). In addition,
we see no excess absorption in any portion of the Lyman-$\alpha$ spectral line profile.
However, this situation strongly contrasts with the observations of September 2011,
in which we see an excess absorption in the total flux of the Lyman-$\alpha$ line
that yields a transit depth of 5.0$\pm$1.3\%.
This level is consistent with the results obtained with the non-resolved
HST/ACS spectra of 2007-2008 (Lecavelier des Etangs et al.\ 2010).
Importantly, the line profile shows two deep absorption regions at specific wavelength intervals
during the transit:
first in the blue part of the spectrum from about -230\,km\,s$^{-1}$ to -140\,km\,s$^{-1}$,
and in the red part of the spectrum from 60 to 110\,km\,s$^{-1}$.
In the blue wing of the Lyman-$\alpha$ spectrum of 2011, the most significant absorption is
visible in the range of \hbox{-230} to \hbox{-140\,km\,s$^{-1}$},
which gives an absorption depth of 14.4$\pm$3.6\%
(\hbox{4-$\sigma$} detection) corresponding to an excess absorption due to hydrogen atoms of 12.3$\pm$3.6\%.
The false-positive probability to find such an excess anywhere in the whole
searched range of -350 to -50\,km\,s$^{-1}$ is only 7\%.
In the red wing of the Lyman-$\alpha$ spectrum
of 2011, absorption is found in the range between 60 to 110\,km\,s$^{-1}$, which yields
an absorption depth of 7.7 $\pm$ 2.7\% (\hbox{3-$\sigma$} detection)
and corresponds to an excess absorption
due to hydrogen atoms of 5.5$\pm$2.7\%. The false-positive probability to find such an
excess over the whole searched range [40 to 200\,km\,s$^{-1}$] is 39\%. This high probability
shows that the absorption seen in the red wing of the spectrum may not be real and is
possibly caused by statistical noise in the data, although interestingly enough, a similar
absorption was also observed in HD\,209458b (Vidal-Madjar et al.\ 2003).
None of the excess absorption features detected in the September 2011 data are seen in
the April 2010 data. We conclude that there are significant temporal variations of the
physical conditions within the extended exosphere of this extrasolar planet between these
two epochs (Fig.~\ref{flux vs time}).
\begin{figure}[tb]
\hbox{
\includegraphics[angle=-90,width=\columnwidth]
{aa_19363_fig2.eps}
}
\caption[]{
Plot of the flux between -230 and -140\,km\,s$^{-1}$ in the blue wing of the
Lyman-$\alpha$ line as a function of time relative to the center of the planetary transit.
Vertical dashed lines
show the beginning and end of ingress and egress of the transit. The red triangular
symbols are for the 2010 observations, while the blue square symbols correspond to
observations of 2011. Horizontal error bars centered on the symbols show the duration of
the exposures in each HST orbit.
The time-tagged data allow independent sub-exposures to
be extracted within each HST orbit (not shown here),
resulting in the same transit signal within error bars.
The light curve of the planet's transit at optical
wavelengths is displayed as a solid black line. The blue dashed line shows the calculated
flux using the numerical simulation with an EUV ionizing flux 5~times the solar value, a
stellar wind of protons with a temperature $T$$\sim$10$^5$\,K, a velocity
$v$$\sim$190\,km\,s$^{-1}$ and density $n$$\sim$3$\times$10$^3$\,cm$^{-3}$ together with an
atmospheric escape rate of 10$^9$\,g\,s$^{-1}$.
}
\label{flux vs time}
\end{figure}
\subsection{Models}
In September 2011, the absorption depth and velocity range show that neutral hydrogen atoms are
present up to very high altitudes at velocities exceeding the escape velocity
of 60\,km\,s$^{-1}$; this unambiguously demonstrates that atmospheric gas must be escaping
from HD\,189733b. For HD\,209458b, the \hbox{Lyman-$\alpha$} excess absorption was
observed in the spectral range between \hbox{-130} to -50\,km\,s$^{-1}$, which is readily
explained by the stellar radiation pressure accelerating hydrogen atoms up to
\hbox{-130}\,km\,s$^{-1}$
(Lecavelier des Etangs et al.\ 2008). The case here of HD\,189733b, which shows excess
absorption at higher velocities between -230 and -140\,km\,s$^{-1}$, is
more challenging to explain. Assuming a distance of 19.3~pc and following the method
described in Ehrenreich et al.\ (2011), we estimated the interstellar medium absorption
and calculated
the Lyman-$\alpha$ emission line profile as seen from the planet. From the
extinction-corrected line profile we estimated that radiation pressure can accelerate
hydrogen atoms up to a radial velocity of -120\,km\,s$^{-1}$ in the exosphere of this planet
(below this radial velocity the stellar flux at the corresponding wavelength in the core
of the emission line is sufficiently high for the radiation pressure to exceed the stellar
gravity). Therefore, an additional acceleration mechanism beyond radiation pressure is
required to explain the high radial velocities of hydrogen measured during the transit.
Charge exchange with stellar wind protons can produce the observed high velocities
(Holstr\"om et al.\ 2008; Ekenb\"ack et al.\ 2010).
To investigate this possibility and interpret the observed H{\sc i} light curve, we
developed a numerical Monte-Carlo simulation of the hydrogen atom dynamics.
The details of the model will be given in a forthcoming paper
(Bourrier et al.\ in preparation). In this N-body simulation, hydrogen atoms are
released from HD\,189733b's upper atmosphere and
are rapidly accelerated by the radiation pressure up to 120\,km\,s$^{-1}$ and then to higher
velocities by charge exchange with protons from the stellar wind. This dynamical model
provides the time-dependent distribution of positions and velocities of the escaping
hydrogen atoms in the cloud surrounding HD\,189733b. With this information, we calculated
the corresponding absorption over the stellar emission line and the resulting transit
light curve, which can be directly compared with the observations. We find that the
observations are well-fitted with an escape rate of neutral hydrogen of
about 10$^9$\,g\,s$^{-1}$
and a stellar wind with a temperature $T$$\sim$10$^5$\,K,
a density $n$$\sim$3$\times$10$^{3}$\,cm$^{-3}$, and a velocity $\sim$190\,km\,s$^{-1}$.
The best-fit model yields a $\chi^2$ of 13.0 for 17~degrees of freedom
for the absorption spectrum given in Fig.~\ref{Absorption vs velocity}.
The EUV flux controlling the neutral hydrogen ionizing timescale should be about
5~times the solar value to explain the moderate absorption observed after the transit of
the planet (Fig.~\ref{flux vs time}).
\begin{figure}[tb]
\hbox{
\includegraphics[angle=-90,width=\columnwidth]
{aa_19363_fig3.eps}
}
\caption[]{Plot of the relative absorption observed in the blue wing of the
Lyman-$\alpha$ stellar line (blue histogram) for the transit of September 7, 2011. The
dashed line shows the model with radiation pressure only; in this case, there is no
absorption at radial velocities below $\sim$-120\,km\,s$^{-1}$, resulting in a large
$\chi^2$ of 22.8. If a stellar wind and charge exchange is considered, hydrogen atoms can
be accelerated to the higher observed velocities. The model with an escape rate of
10$^9$\,g\,s$^{-1}$ (solid line) gives a $\chi^2$ of 13.0 for 17 degrees of freedom.
}
\label{Absorption vs velocity}
\end{figure}
\begin{figure*}[tb]
\includegraphics[angle=0,width=0.78\textwidth]{aa_19363_fig4.ps}
\caption[]{Swift XRT X-ray light curve of HD\,189733 about the time of the September 7,
2011 transit.
The data were binned into one point per snapshot visit, with typical
exposure times of about 27~minutes. Vertical dashed lines show the beginning and end of
ingress and egress of the transit. A bright flare occurred about 8 hours before the
transit. The observed average count rate during the flare snapshot is a factor 3.6 higher
than the mean for the whole light curve, indicating a peak X-ray flux (0.3-3\,keV) that must be less than 1.3$\times$10$^{-12}$\,erg\,s$^{-1}$\,cm$^{-2}$. For comparison,
the right panel shows the distribution for 63~epochs of Swift measurements
(including the 16 obtained in September 2011), covering a wide range of timescales.
The flare occurring shortly before
the transit is the highest X-ray flux of all 63~measurements.
}
\label{X-ray}
\end{figure*}
\section{Swift X-ray simultaneous observations}
The evaporation of hot Jupiters is driven by the X-ray/EUV irradiation of the
planet by its parent star. To quantify the level of X-ray
irradiation at the time of our observations in September 2011, we obtained
contemporaneous observations with the X-ray telescope (XRT) of the Swift
spacecraft (Gehrels et al.\ 2004).
A source was detected toward HD\,189733 with a mean count
rate of 0.0119$\pm$0.0007\,s$^{-1}$;
consistent with previous XMM-Newton observations (Pillitteri et al.\ 2010),
we did not detect
the M-star binary companion, \object{HD\,189733B}, but we did find evidence for a very weak
hard X-ray source located about 13\arcsec\ south of \object{HD\,189733A}.
The Swift XRT spectrum of HD\,189733 can be fitted with a multi-temperature optically-thin
thermal plasma model (Mewe et al.\ 1985; Liedahl et al.\ 1995) that is
typical of the coronal X-ray emission from active stars. Using a three-temperature
fit (temperatures of 0.12, 0.46 and 4.5\,keV) we found an average X-ray flux
in the 0.3-3\,keV band of 3.6$\times$10$^{-13}$\,erg\,s$^{-1}$\,cm$^{-2}$,
consistent with
XMM-Newton observations of HD\,189733 (Pillitteri et al.\ 2011).
This flux corresponds to a
planetary irradiation rate of 1.2$\times$10$^{24}$\,erg\,s$^{-1}$,
which could drive a mass loss rate of up to 1.0$\times$10$^{11}$\,g\,s$^{-1}$
(assuming 100\% evaporation efficiency and taking evaporation enhancement by tidal
forcing into account; Lecavelier des Etangs et al.\ 2007; Erkaev et al.\ 2009).
Assuming a realistic emission measure distribution (Sanz-Forcada et al.\ 2011),
we can estimate the total luminosity across the X-ray/EUV band at the time of our
observation to be 7.1$\times$10$^{28}$\,erg\,s$^{-1}$,
corresponding to an energy-limited evaporation rate of
4.4$\times$10$^{11}$\,g\,s$^{-1}$.
The X-ray irradiation is consistent with the estimated
escape rate, which would thus require around 1\% efficiency
in the conversion of input energy into mass loss (Ehrenreich \& D\'esert 2011).
But this is only a lower limit because
the estimated escape rate of neutral hydrogen
atoms of about 10$^{9}$\,g\,s$^{-1}$ does not include the escape of ionized hydrogen
at the exobase of the atmosphere, and it is therefore a lower limit for the net
escape from HD189733b.
The Swift XRT light curve of HD\,189733 (Fig.~\ref{X-ray}) shows that the star exhibits
significant X-ray variability, and most notably, a bright flare that occurred
about 8~hours before the planetary transit. This flare could explain the
observed variations in the extended cloud of high-velocity hydrogen atoms
escaping the planet, because this could affect the properties of the stellar wind
needed to accelerate the atoms to the observed radial velocities. Besides,
the enhanced X-ray/EUV irradiation associated with this flare
must lead to a significantly enhanced escape rate. With our best-model
parameters, an enhanced escape rate leads to a more extended exospheric hydrogen
cloud and thus to a stronger absorption after about one hour; the absorption
remains at a high level during a typical ionization timescale, which is
constrained from the post-transit Lyman-$\alpha$ observations to be about 5 hours.
Therefore, an X-ray flare occurring 8~hours earlier is expected to lead to
higher escape rate that is then detectable in Lyman-$\alpha$.
\section{Conclusion}
Whether they are related to the observed X-ray flare or not, the
temporal variations in the
evaporating atmosphere of HD\,189733b are clearly detected in Lyman-$\alpha$.
The variability of the neutral hydrogen cloud around HD\,189733b can explain the
high dispersion of absorption depth measurements in spectrally non-resolved
Lyman-$\alpha$ observations (Lecavelier des Etangs et al.\ 2010);
combining this with the present high
signal-to-noise ratio spectrally resolved observations, we conclude that
escape signatures are detected in about half of the total five transits
observed in Lyman-$\alpha$. More simultaneous X-ray and Lyman-$\alpha$ observations
are needed to obtain a better picture of the complex relationship between the
stellar energetic input to the planet and the atmosphere's response to it,
and to constrain theoretical
models of a space weather event on hot-Jupiters ({\it e.g.} Cohen et al.\ 2011).
The HD\,189733 system appears to be the target of choice, but future observations
should also enlarge the diversity of stellar and planetary system properties
to better distinguish the effects of the stellar-planet interactions from the
intrinsic variability in the observed atmospheres.
\begin{acknowledgements}
Based on observations made with the NASA/ESA Hubble Space Telescope, obtained at the Space Telescope Science Institute, which is operated by the Association of Universities for Research in Astronomy, Inc., under NASA contract NAS 5-26555.
This research has made use of data obtained from NASA's Swift satellite.
G.E.B.\ acknowledges financial support by this program through STScI grant
HST-GO-11673.01-A to the University of Arizona.
These observations are associated with program \#11673. This work has been supported by an award from the {\it Fondation Simone et Cino Del Duca}.
\end{acknowledgements}
|
{
"timestamp": "2012-06-28T02:03:54",
"yymm": "1206",
"arxiv_id": "1206.6274",
"language": "en",
"url": "https://arxiv.org/abs/1206.6274"
}
|
\section{Introduction}
The disk-halo interface is crucial for understanding the evolution of spiral galaxies. Star formation drives vertical hot gas flows in a process which is described by the galactic fountain \citep{shapiro_field_1976,bregman_1980} or chimney model \citep{norman_ikeuchi_1989}. In both models, the outflowing hot gas eventually condenses into cool clouds that fall back down onto the disk on timescales of tens of Myr. These returning clouds are thought to make up at least a part of the population of high and intermediate velocity clouds (HVCs and IVCs). The returning cool gas flow must be interacting with the ambient disk-halo medium, as is indicated by the HVC and IVC morphology \citep{bruens_etal_2000,heitsch_putman_2009} as well as the kinematics of thick disks in external galaxies and the Milky Way \citep{heald_etal_2007,fraternali_binney_2008,kalberla_kerp_2009}. Magnetic fields appear to be essential in reproducing the structure and evolution of the multiphase ISM \citep[e.g.,][]{avillez_breitschwerdt_2005,hill_etal_2012} and the longevity of infalling clouds \citep{santillan_etal_2004}.
NGC~6946 is a nearby \citep[$D=6.8\,\mathrm{Mpc}$;][]{karachentsev_etal_2000} grand-design spiral galaxy that has been studied at a broad range of wavelengths. It has a substantial integrated star formation rate \citep[$2.8\,M_\odot\,\mathrm{yr}^{-1}$;][]{calzetti_etal_2010}, together with a large number of catalogued holes in the \textsc{H\,i}\ density distribution \citep{boomsma_etal_2008} that have sizes, masses, and energetics consistent with formation by localized pockets of star formation activity. For many of these features, the stars which provided the required energy are not distinctly visible, but this is typical of \textsc{H\,i}\ holes \citep[e.g.,][]{brinks_bajaja_1986,bagetakos_etal_2011,warren_etal_2011}. The properties of the magnetic field ($\vec{B}$) in NGC~6946 have also been studied in detail, showing that its energy density is comparable to other ISM components, and perhaps even dominates at large galactocentric radii \citep{beck_2007}. The large-scale magnetic field has been probed using polarized synchrotron radiation and its Faraday rotation measure ($\mathrm{RM}\propto\int\,n_e\vec{B}\cdot\mathrm{d}\vec{l}$ where $n_e$ is the thermal electron density and $\vec{l}$ is the line of sight), both of which trace the ordered field. Faraday RM is a sensitive tracer of the line of sight magnetic field, and is robustly measured using the RM Synthesis technique \citep{brentjens_debruyn_2005,heald_etal_2009}. As in other spirals, NGC~6946 has an axisymmetric spiral pattern in the disk combined with a quadrupolar poloidal component that becomes dominant at large vertical distances from the disk \citep{braun_etal_2010}. High-quality \textsc{H\,i}\ line data \citep{boomsma_etal_2008} and radio continuum polarization data from a combined dataset spanning $1300-1432$ and $1631-1763$ MHz \citep{heald_etal_2009} are available in the literature.
\section{A magnetized \textsc{H\,i}\ bubble}
The image of RM \citep{heald_etal_2009} corresponding to the widespread diffuse polarized synchrotron radiation in the disk of NGC~6946 is displayed in Figure~\ref{fig:rmhole}, and reveals a remarkable feature at $(\alpha,\delta)_{\mathrm{J2000.0}}\,=\,(20^h35^m18^s.2,60^\circ06^\prime18^{\prime\prime})$. There, a clear RM gradient is co-located with one of the \textsc{H\,i}\ holes (see also Figure~\ref{fig:grid}) previously identified in the literature \citep{boomsma_etal_2008}. The RM at that location ranges from $18.8-57.0\,\mathrm{rad\,m^{-2}}$. The midpoint of the RM gradient is close to the typical RM value in the surrounding area of the disk, about $40\,\mathrm{rad\,m^{-2}}$. The RM gradient is on a scale of $17^{\prime\prime}$, or a linear size of 0.6 kpc (somewhat smaller than the \textsc{H\,i}\ hole itself, which is 0.9 kpc in diameter from rim to rim). The position angle of the gradient is about $225^\circ$, similar to the local orientation of the ordered magnetic field as can be clearly seen in Figure \ref{fig:rmhole}. By measuring the typical fluctuations in the RM values on the same angular scale throughout the rest of the galaxy, this particular feature is found to be significant (i.e., distinguishable from a noise feature) at the $4\sigma$ level. Here, $\sigma$ includes both noise and contributions from real fluctuations in $\vec{B}$ and $n_e$ on 0.6 kpc scales throughout the star forming disk. Very few RM fluctuations with similar magnitude are found, and the others are preferentially at the edge of the polarized disk with low confidence. We can also assess the probability of a chance association of this resolved RM feature with an \textsc{H\,i}\ hole. This is done by computing the density of \textsc{H\,i}\ hole centers within the regions with well-detected diffuse polarized emission, taking into account the quoted positional uncertainties of $4\arcsec\times4\arcsec$ \citep{boomsma_2007}. In this way, we estimate that the chance of a spurious spatial association of the RM feature and the \textsc{H\,i}\ hole is approximately 0.5\%.
\begin{figure}
\centering
\includegraphics[width=0.9\textwidth]{fig1}
\caption{RM image of NGC~6946 from \citet{heald_etal_2009}. Magnetic field vectors are shown with lines. The \textsc{H\,i}\ hole catalogued by \citet{boomsma_etal_2008} and found to be colocated with the RM gradient is indicated with a black ellipse.}
\label{fig:rmhole}
\end{figure}
\begin{figure*}
\centering
\includegraphics[width=0.95\textwidth]{fig2}
\caption{Images of NGC~6946. All panels are presented on the same angular scale and indicate the locations of the \textsc{H\,i}\ holes catalogued by \citet{boomsma_etal_2008} with black ellipses. Insets show the immediate vicinity of the feature described in the text. Top left: \textsc{H\,i}\ column density. The blue line shows the slice used to create the PV diagram in Figure~\ref{fig:pv}. Top right: RM image. Bottom left: optical image (created from DSS-II $B$,$R$,IR plates). Bottom-right: GALEX Nearby Galaxy Survey \citep[NGS;][]{gildepaz_etal_2007} FUV-NUV false color image.}
\label{fig:grid}
\end{figure*}
The RM gradient is centered at the typical RM value in the surrounding disk, making it unlikely to be caused by fluctuations in $n_e$. The RM gradient is, on the other hand, naturally explained by a significant vertical deviation of the local ordered magnetic field direction -- the field orientation pointing toward the observer on one side, and away on the other. The observed RM pattern originates on the front side of the disk because turbulence in the midplane depolarizes radiation from the backside \citep[e.g.,][]{braun_etal_2010}. Since RM is defined to be a positive quantity for a LOS magnetic field pointing toward the observer, the observed pattern implies that the field is oriented toward the observer on the side marked `A' in Figure~\ref{fig:rmhole}, and away on side `B'. Thus the ordered magnetic field is directed inward (along the spiral pattern toward the center of the galaxy). This agrees with the conclusion drawn from the much larger-scale RM gradient already observed across the entire disk \citep{beck_2007}.
\section{Discussion}
The coincidence of the RM gradient with the \textsc{H\,i}\ hole strongly suggests that this magnetic structure is caused by the energetic process that led to the formation of the \textsc{H\,i}\ hole itself.
The \textsc{H\,i}\ properties of the hole indicate that it is a fairly young \citep[$\tau\sim20\,\mathrm{Myr}$;][]{boomsma_2007} but well-defined structure (albeit with low \textsc{H\,i}\ column density contrast; see Figure~\ref{fig:grid}). A position-velocity (PV) diagram along a slice through the hole feature is presented in Figure~\ref{fig:pv}. The hole is classified by \citet{boomsma_2007} as Type 1 under the scheme described by \citet{brinks_bajaja_1986}, meaning that it is characterized by a gap in the PV diagram but without the appearance of an expanding bubble. This particular hole is most easily recognized in Figure~\ref{fig:pv} by inspection of the contours and noting the density enhancement at the edges of the hole. A two-component fit to the average velocity profile over the width of the hole is also shown. The combination of two Gaussian components produces an excellent match to the data. The velocity difference between the components is
$11.7\,\mathrm{km\,s^{-1}}$; correcting for the inclination angle of $38^\circ$, the velocity along the axis perpendicular to the disk is $14.8\,\mathrm{km\,s^{-1}}$. The velocity dispersions of the two components increases in the redshifted component to $14.1\,\mathrm{km\,s^{-1}}$ compared to $6.5\,\mathrm{km\,s^{-1}}$ in lower-velocity component, which we associate with the ISM in the main disk. Assuming that the redshifted gas is located on the front side along with the magnetic feature, the second \textsc{H\,i}\ component represents gas which is falling back onto the disk at an average velocity of $15\,\mathrm{km\,s^{-1}}$. Inspection of the PV diagram shows that the infall is more pronounced on the SE (radially outward) side of the hole. Such wings in the velocity profiles are ubiquitous in NGC~6946 and are certainly not unique to this region, but this particular redshifted feature fits naturally into the picture described below. Note that a blueshifted component, if present, would be confused by \textsc{H\,i}\ emission from the Milky Way as indicated in Figure~\ref{fig:pv}.
\begin{figure*}
\centering
\includegraphics[width=0.95\textwidth]{fig3}
\caption{Kinematics of the \textsc{H\,i}\ hole. Left: PV diagram along the slice shown in the top left panel of Figure~\ref{fig:grid}. The orientation along the slice (SE to NW) is indicated. Contours start at $0.36\,\mathrm{mJy\,beam^{-1}}$ ($1.5\sigma$) and increase by powers of two. Dotted contours are drawn for negative values. The locations of the edge of the \textsc{H\,i}\ hole are indicated with vertical dashed blue lines. Note the high-velocity wing at the location of the hole. The red hatched region (at negative velocities) shows the area of the diagram which is contaminated by MW \textsc{H\,i}\ emission. Right: the average velocity profile within the hole region of the PV diagram, along with a two-component Gaussian fit to the profile. The red and blue components together give the combined fit, which is shown in green. The residuals of the fit are shown at the bottom. Again, the red-hatched region indicates the values that are contaminated by MW \textsc{H\,i}\ (that region was not included in the fit).}
\label{fig:pv}
\end{figure*}
The magnetic field strength in the vicinity of the bubble can be estimated by taking advantage of the fact that the observed RM gradient ($\Delta\mathrm{RM}$) is symmetric about the average RM in the region. Half of the RM gradient can thereby be attributed to the field strength, which is to say that
\begin{equation}
\frac{1}{2}\,\Delta\mathrm{RM}\,=\,0.81\int\,n_e\,\vec{B}\cdot\mathrm{d}\vec{l}.
\end{equation}
Taking the inclination $i$ of the galaxy into account, we have
\begin{equation}
\frac{1}{2}\,\Delta\mathrm{RM}\,=\,0.81\,n_e\,|\vec{B}|\,L\,\sin\alpha\sin i,
\end{equation}
where $L$ is the path length through the volume of interest and $\alpha$ is the angle between the magnetic field lines and the disk plane. Taking the inclination to be $38^\circ$, and estimating $\alpha=45^\circ$, $L=300\,\mathrm{pc}$ (half of the distance across the RM feature), and a typical thermal electron density $n_e=0.05\,\mathrm{cm^{-3}}$ \citep[cf.][]{ferriere_2001}, the resulting regular magnetic field strength is determined to be
\begin{equation}
|\vec{B}|\,=\,7.2\left[\left(\frac{n_e}{0.05\,\mathrm{cm^{-3}}}\right)\left(\frac{L}{300\,\mathrm{pc}}\right)\left(\frac{\sin\alpha}{\sin45^\circ}\right)\right]^{-1}\,\mu\mathrm{G},
\end{equation}
which compares well with the magnetic field strength previously measured in the range $8-10\,\mu\mathrm{G}$ in the magnetic arms \citep{beck_2007}, as is the feature studied here.
Taken together, these lines of evidence provide new observational support to a picture consistent with the chimney model \citep{norman_ikeuchi_1989} of disk-halo interaction, in which star formation activity in the disk drives convective hot gas flows upward into the halo, carrying magnetic fields along with the hot gas motion, and forming bubbles in the disk that are observable as \textsc{H\,i}\ holes. The hot gas cools in the halo, condensing into \textsc{H\,i}\ clouds which then return to the disk on time scales of a few tens of Myr and with a net outward radial movement \citep{collins_etal_2002,fraternali_binney_2006}. State-of-the-art numerical models \citep[e.g.,][]{korpi_etal_1999,avillez_breitschwerdt_2005} depict the detailed evolution of the ISM during this process. The observational results presented here make the first clear identification of magnetic involvement in disk-halo flows. This has significant consequences for our understanding of the galactic dynamo process, because large-scale magnetic fields are carried upward by the hot gas, but will not return downward with the cool clouds traced by the redshifted \textsc{H\,i}\ \citep{brandenburg_etal_1995}. Moreover, the smaller-scale turbulent component of the magnetic field will also be carried out of the disk, which has been proposed as a mechanism to prevent quenching of the dynamo process \citep{shukurov_etal_2006}. As a related point, we remark that a close connection in NGC~6946 between the position angle of the elliptically shaped \textsc{H\,i}\ holes and the local spiral pitch angle has previously been noticed \citep{boomsma_etal_2008}, and may actually reflect the fact that superbubbles preferentially expand along the direction of the ordered field \citep{tomisaka_1998,stil_etal_2009}.
It is remarkable that of the many \textsc{H\,i}\ holes in NGC~6946 \citep[121 are listed by][]{boomsma_etal_2008}, only one shows a clear RM signature. In estimating how unique this feature may be, there are several factors to take into consideration.
\begin{itemize}
\item The lack of polarized synchrotron radiation on the receding (southwest) side of the galaxy, clearly seen in Figure~\ref{fig:rmhole}, is now understood to be a geometric effect \citep{braun_etal_2010}, so no RM information is available for the subset of holes in that quadrant of the disk. This effect is relevant for about 30 (25\%) of the catalogued holes.
\item Many holes are located where more intense star formation is taking place, which increases the level of turbulence and thus the local degree of depolarization. This explains why there is an overall anticorrelation between the locations of \textsc{H\,i}\ holes and regions with diffuse polarized synchrotron radiation. It is likely that the particular feature studied here is located some distance above (closer to the observer than) the turbulent midplane of NGC~6946, making it visible in diffuse polarization. Only 16 (13\%) of the catalogued \textsc{H\,i}\ holes fall {\it fully} within the regions which do not suffer from depolarization effects or the geometric effect mentioned above (see Figure~\ref{fig:grid}). Estimating that the diffuse polarized emission originates at least one exponential scale height above the midplane on the frontside, only about 25\% of those 16 holes could have been traced by diffuse polarized emission.
\item The age of the hole must be within a certain range: old enough that the field configuration has had time to gain a significant vertical offset, but young enough that vertical shear has not yet destroyed the observational signature. For an outflow speed of $100\,\mathrm{km\,s^{-1}}$, the feature can grow to 300\,pc height after only about 3\,Myr. The vertical shear is not known for NGC~6946 \citep{boomsma_etal_2008}, but it is suspected to be at least as high as the measured value in NGC~891, $\approx15\,\mathrm{km\,s^{-1}\,kpc^{-1}}$ \citep{heald_etal_2006,oosterloo_etal_2007}. For this, the characteristic shear time would be around 60\,Myr. Other galaxies have higher shear values \citep[e.g.,][]{heald_etal_2007}, so the length of the observational window is short indeed but matches well with the age estimated for this particular hole from its \textsc{H\,i}\ properties.
\item Finally, we note that the azimuthal location of the \textsc{H\,i}\ hole within the disk of NGC~6946 is only $12^\circ$ from the minor axis. Since the pitch angle of the spiral pattern in the magnetic field vectors is about $18^\circ$ (cf. the optical spiral pitch angle given by \citet{kennicutt_1981}, $28^\circ$), this means that the location in the disk leads to the ideal geometrical situation for identifying an RM gradient across the feature. At other locations in the disk such a feature would have a different and far less obvious observational signature.
\end{itemize}
Considering all of these effects, we estimate that the feature described here could have been detected in only about 4 of the 121 catalogued \textsc{H\,i}\ holes. It therefore seems reasonable to expect that the vertical magnetic field transport detected in this one \textsc{H\,i}\ hole is actually much more ubiquitous across the disk of NGC~6946 and other similar galaxies. Sensitive observations at higher radio frequencies, where depolarization effects are less severe, can be utilized to test this prediction. Observations in the $2-4$ GHz range, for example, would provide a reasonable compromise between recovery of depolarized diffuse emission on the one hand, and acceptable RM precision on the other hand.
\acknowledgments
I thank Rense Boomsma and Tom Oosterloo for providing access to the \textsc{H\,i}\ data and the list of \textsc{H\,i}\ holes, as well as John McKean for helpful comments on an early version of the manuscript. I also thank the anonymous referee for comments that helped to strengthen the conclusions of the paper. The Second Palomar Observatory Sky Survey (POSS-II) was made by the California Institute of Technology with funds from the National Science Foundation, the National Geographic Society, the Sloan Foundation, the Samuel Oschin Foundation, and the Eastman Kodak Corporation. The Galaxy Evolution Explorer (GALEX) is a NASA Small Explorer. The mission was developed in cooperation with the Centre National d'Etudes Spatiales of France and the Korean Ministry of Science and Technology.
{\it Facilities:} \facility{WSRT}.
|
{
"timestamp": "2012-07-16T02:01:30",
"yymm": "1206",
"arxiv_id": "1206.6569",
"language": "en",
"url": "https://arxiv.org/abs/1206.6569"
}
|
\section{Introduction} \label{sec:intro}
Pulsar nulling, which was first reported by \citet{bac70}, is a
phenomenon in which the pulse emission abruptly turns off for a
certain number of pulse periods, then suddenly returns to
normal. Early studies showed that the ``Nulling Fraction'' (NF),
i.e. the fraction of time that a pulsar is in null state, of most
nulling pulsars is less than 10\% \citep{big92a,viv95}.
\citet{wmj07} studied a sample of 23 nulling pulsars,
including some extreme nulling pulsars with NF up to 95\%.
Investigating the emission behaviors of nulling pulsars is important
to understand the pulsar emission mechanism. Different patterns of
transition between null and burst state have been noted by several
authors. For PSR B1749$-$28 \citep{rit76}, B0809$+$74
\citep{la83,vsrr03}, B1944$+$17 \citep{rit76,dchr86} and B0818$-$41
\citep{bgg10}, the onset of burst is abrupt, and the transition from
burst to null state shows a gradual decline of pulse
emission. However, the pulse intensity increases gradually when
emission starts after a null for PSR J0941$-$39 \citep{bb10}, and the
cessation of emission is sudden for PSR B0031$-$07 \citep{viv95} and
B0818$-$13 \citep{la83}. \citet{bgg10} investigated the post- and
pre-null emission behavior of PSR B0818$-$41, and showed that the
first few pulses after the nulls outshine following pulses, whereas
the last few pulses before the nulls are less intense than other
pulses, and they noted that the phenomenon of null may be associated
with some kind of `reset' of the pulsar radio emission engine. Null
of most pulsars occurs randomly. However, \citet{klo+06} reported the
quasi-periodic nulls of B1931$+$24, furthermore, periodicity in
nulling pulsars has been detected in PSR B1133$+$16 \citep{hr07},
J1819$+$1305 \citep{rw08} and J1738$-$2330 \citep{gjk09}.
PSR J1502$-$5653 was discovered during the Parkes Multibeam Pulsar
Survey \citep{hfs+04}. The rotation period of the pulsar $P$ is
0.535 s, and its first derivative $\dot{P}$ is
1.83$\times{10^{-15}}$ s s$^{-1}$. Correspondingly, it has a
characteristic age of 4.64$\times{10^6}$ years and surface magnetic
field strength of $10^{12}$ gauss \citep{hfs+04}. \citet{wmj07}
investigated J1502$-$5653 at 1518 MHz and showed that this pulsar
has a NF of 93\%, which makes it an extreme nulling pulsar, with
active pulses lasting typically a minute at intervals of 10 to 15
min of null pulses.
In this paper we carry out a detailed investigation of the emission
behavior of PSR J1502$-$5653. Data analysis and results are presented
in Section 2. The implications of the results are discussed in Section
3. Finally in Section 4, we summarize this work.
\section{Data analysis and results}\label{sec_res}
The data were obtained on September 12, 2002 using the Parkes 64-m
telescope, at a central frequency of 1374 MHz. The data last for 6
hours, and contain 40308 pulse periods. The filterbank
system has a total bandwidth of 288 MHz with $96\times3$MHz channels
of polarization-summed data (for each beam) which are sampled every
1 ms. Details of the observing system are described by
\citet{mlc+01}. The single-pulse time sequence is obtained by
de-dispersing the data at a dispersion measure ($DM$) of 194.0 \dmu.
Pulse intensities were computed by summing samples within an on-pulse
window of width 20 ms and subtracting the baseline level determined
in an off-pulse window of width 200 ms.
\subsection{Time sequence and blocks of successive pulses}
As shown in Fig. \ref{fig_tsline}, the time sequence shows many
blocks of consecutive strong pulses. In this paper, we considered
intervals more than ten pulse periods with no detectable emission as
null state, and intervals between null states as burst states. In
this way, a total of 29 blocks of burst state (3451 pulses) are
identified. The duration of these blocks varies from about 32 s (60
pulses) to 2 min (240 pulses), with an average duration of 1 min,
while null state lasts from about 16 s (30 pulses) to 25 min (2800
pulses). The Fourier transformation of the autocorrelation function
of the whole time sequence shows two relatively broad peaks at
periods of 11 min and 18 min, implying the burst appearance of the
pulsar may be quasi-periodic.
Ten typical blocks of burst are displayed in separate plots of
Fig. \ref{fig_block10} in the form of grayscale diagram (left panel)
and intensity diagram (right panel). The burst blocks in these plots
begin from the 11th pulse, and preceding 10 nulls are reserved for
comparison. The first ten pulses in each block are quite strong,
and the first few tens of pulses are uninterrupted
by nulls. However, in the middle or late stages of some
burst blocks, the pulse sequence is interrupted by a few short nulls,
usually less than 10 periods. Just following some burst blocks,
one or two sporadic strong pulses are detected occasionally during
the null state. These sporadic single pulses are similar
to the pulses during burst states in intensity, phase and shape.
As can be seen in Fig. \ref{fig_block10}, the pulse intensity shoots
up to a relatively high magnitude for the first few pulses, and then
the pulse intensity drops gradually. After about ten to twenty pulses,
this relatively steady decrease is replaced by a pattern of
random fluctuation in intensity. As shown in the left panel
of Fig. \ref{fig_block10}, the single pulses drift from later to
earlier longitudes at the beginning of each block of burst,
and then present irregular modulation in pulse phase. These
indicate that the variations of pulse intensity and pulse phase
modulations may be correlated at the early stage of burst; this is
studied further in Section 2.3. All the 29 blocks of burst start
with abrupt rise of the intensity, and at least 23 of them end up
with a gradual decline.
\begin{figure}
\centerline{\psfig{file=1502tsline.ps,width=85mm,angle=270}}
\caption{Six hours of time sequence of PSR J1502$-$5653. The time
sequence is equally divided into three panels, each presents two
hours of data.} \label{fig_tsline}
\end{figure}
\begin{figure}
\centerline{\psfig{file=1502block10.ps,width=90mm,angle=270}}
\caption{Ten blocks of 300 individual pulse periods containing burst
presented in grayscale diagram (left panel) and intensity diagram
(right panel) from PSR J1502$-$5653. Phase shifting to earlier
longitude and intensity declining can be seen clearly at the early
stage of each burst block.} \label{fig_block10}
\end{figure}
\subsection{Average profiles and pulse energy distributions}
Fig. \ref{fig_prof} shows the integrated profiles for the whole data
span including both burst and null pulses and for just the null
pulses. There is no detectable profile by integrating all pulses in
null state, whereas when the 3451 pulses in burst blocks
added in, the profile is prominent showing that the
burst pulses are actually very strong. The average profile of the
pulsar is narrow, with a 10 per cent width of 9.4$^\circ$ (14 ms) in
longitude.
\begin{figure}
\centerline{\psfig{file=1502theprof2.ps,width=80mm,angle=270}}
\caption{Integrated profiles of PSR J1502$-$5653 for all 40308
pulses
(top panel) and the 36857 null pulses (bottom panel).}
\label{fig_prof}
\end{figure}
\begin{figure}
\centerline{\psfig{file=1502histogram2.ps,width=76mm,angle=270}}
\caption{Histogram of on-pulse (solid line) and off-pulse (dotted
line) energies normalized by the mean pulse energy for PSR
J1502$-$5653.} \label{fig_histogram}
\end{figure}
Fig. \ref{fig_histogram} presents histograms of pulse energy
distribution in the pulsar's on-pulse and off-pulse windows, which
are constructed using the method described by \citet{rit76}.
On-pulse and off-pulse energies are determined by integrating within
a on-pulse window and in a off-pulse window of same duration after a
same baseline subtracted, respectively. The histogram formed from
off-pulse energies (dot line) centers around zero, while that from
on-pulse energies (solid line) have a ``long tail" component due to
burst pulses and a big gaussian component due to null pulses. The
NF of the pulsar is estimated to be 93.6\% through the histograms.
As shown in this figure, the energy of the strongest pulse is 42
times that of mean pulse, suggesting the burst pulses are strong and
highly modulated. This is the first pulse in the burst which
situated at about 332 min in Fig. \ref{fig_tsline}.
\subsection{Single pulse intensity variation and phase modulation}
To further study the emission behavior of PSR J1502$-$5653 during the
early stage of burst just after null, we construct the mean pulse
sequence by superimposing the first 50 detectable burst pulses of all
blocks (the shortest burst block contains more than 50 burst pulses)
in accordance with the sequence of pulses and the pulse phases,
while ten earlier pulses are also included for comparison.
The distinguishable boundaries from null to burst, the abundance of
burst blocks in the data and no null appears in the first 50
pulses in all burst blocks make this method feasible and effective
in investigating the emission properties of the early stage of burst.
The result is plotted in the top-middle panel of Fig. \ref{fig_phase}.
The intensity fluctuation and phase modulation of the first ten pulses
in the burst state look different from that of the following pulses.
\begin{figure}
\centerline{\psfig{file=1502prof-insert-pha.ps,width=85mm,angle=270}}
\caption{Phase and intensity variations of mean pulse sequence (see
text). The top-middle panel shows gray-scale plot of the sequence.
The top-left and top-right panel present phase and intensity
variation of the mean pulse sequence respectively. The bottom
panel shows the average profile (solid line) and three profiles,
which are formed from the first three pulses (dashed line) in
burst state, the fourth to ninth pulse (dot-dashed line) and
the tenth to 15th pulse (dotted line), respectively.}
\label{fig_phase}
\end{figure}
\begin{figure}
\centerline{\psfig{file=plotpulse.ps,width=65mm,angle=270}}
\caption{Plots of three consecutive pulses selected from the mean
pulse sequence. From top to bottom, they are the 9th, 10th and 11th
pulses in the mean pulse sequence. The top one is the last null
pulse, the middle is the transition pulse at the start of burst and
the bottom one corresponds to the first pulse after the transition
pulse in the mean pulse sequence.} \label{fig_plot}
\end{figure}
As shown in the top-right panel of Fig. \ref{fig_phase}, the intensity
shoots up at the first mean pulse in the burst state, and remains
strong for about three pulses, then goes down exponentially for following
sequence of about twenty pulses, and becomes stable at the half of maximum
intensity for next tens of pulses.
Using the method described by \citet{bgg10}, we calculate that the
average intensity of the first three mean pulses in the burst state is
1.4, 1.8, 2.2 and 2.4 times that of the following No. 4-9, 10-15, 31-40
and 41-50 mean pulses respectively.
The top-left panel of Fig. \ref{fig_phase} shows that the peak phases
of the first few burst pulses appear at later longitudes than that of the
following pulses. In about thirteen pulse periods, the pulse phase
drifts about 0.8 degree to earlier longitudes, and the phase
of the 13th pulse is equal to the peak phase of average profile,
then the apparent drifting stops and is replaced by irregular phase
modulation. We note that the intensity fluctuation and phase
modulation of the pulsar is correlated in the beginning of burst.
In Fig. \ref{fig_phase} there is some evidence for a weak wide pulse
at the start of burst. This pulse is the 10th pulse in the mean
pulse sequence. We call it ``transition pulse'' in this paper. Fig.
\ref{fig_plot} shows three consecutive pulses selected from the mean
pulse sequence, the last null pulse, the transition pulse, and the
first strong pulse. The transition pulse have the full width at half
maximum (FWHM) of 6.7$^\circ$ and signal-to-noise ratio (S/N) of
4.34. For comparison, the S/N of the first strong burst pulse is
44.1 and the width is 4.04$^\circ$. The transition pulse is very
weak, so it is only detectable in the mean pulse sequence.
The bottom panel in Fig. \ref{fig_phase} displays the average profile
(solid line) of the pulsar and three profiles which are obtained from
the first three pulses following the transition pulse,
the fourth to the ninth pulses and the tenth to 15th pulses, respectively.
The peaks of these three profiles appear at longitudes 0.72$^\circ$,
0.35$^\circ$ and 0.13$^\circ$ respectively, where the peak phase of
the average profile is set as zero. The widths of these three
profiles are 3.86$^\circ$, 4.06$^\circ$ and 4.33$^\circ$, respectively,
while that of the average profile is 4.53$^\circ$.
Fig. \ref{fig_width} presents the FWHM of \textbf{25} mean pulses
in burst state, excluding the transition pulse, showing that
the FWHM increases with pulse number at the beginning
of burst. Around the 13th pulse of the burst the width reaches that of
the average profile.
Apart from the wide transition pulse, it is clear that in a short
duration after the null, the radiation window gradually
broadens while the pulses drift from later to earlier longitudes.
The middle and later stage in burst block are often
randomly disrupted by short nulls, and the behaviour is not so
systematic as in the early stages.
\begin{figure}
\centerline{\psfig{file=width.ps,width=65mm,angle=0}} \caption{FWHM
of the 25 mean pulses in burst state of the mean pulse sequence
versus the corresponding pulse number. The dashed line denotes the
pulse width of the average profile. The error of the width is
derived from the uncertainty given by standard guassian fitting
procedure.} \label{fig_width}
\end{figure}
\section{DISCUSSION} \label{sec:dis}
The emission of J1502$-$5653 is characterized by abrupt transition
from null to burst with a timescale of less than two pulse periods, a
gradual decrease of pulse intensity in the early stage of a burst
which is accompanied by a shift to earlier longitude in pulse phase,
and a broadening in pulse width. Gradual cessation of the emission in
some bursts before nulls is also noticed. Similar behavior can be seen
in PSR B0818$-$14. \citet{bgg10} reported that, for this pulsar, the
transition from nulls to bursts is abrupt and pulse intensity from
bursts to nulls appears to reduce gradually, and the profile shape of
the first few pulses differs from that of average profile. They also
mentioned that the behavior of subpulse drifting at the beginning of
burst pulses after null is different from the following pulses.
Similarly, in PSR J1502$-$5653, the pattern of phase modulation
of the first few pulses is distinct from that of the later pulses
during burst.
Recently, \citet{bb10} discovered bizarre emission behavior of PSR
J0941$-$39, i.e. sometimes it only emits sporadic pulses and at other
times it behaves just like a nulling pulsar. From Fig. 5 of
\citet{bb10}, we notice that the post-null pulse phases seem to shift
towards earlier longitudes, and this looks similar to the drifting
behavior of PSR J1502$-$5653. However, unlike PSR J1502$-$5653, the
pulse intensity of PSR J0941$-$39 appears to increase gradually at the
beginning of post-null emission, when it behaves like a nulling
pulsar.
The post-null pulse drifting of PSR J1502$-$5653 may be explained by
the vacuum gap model \citep{rs75}. According to the classical vacuum
gap model, the sub-beams of emission circulate around the magnetic
axis, as a result of ${\bf E}\times{\bf B}$ drift of spark plasma
filaments. At the beginning of each burst, the electric field in the
accelerating gap is relatively high and in consequence the observed
${\bf E}\times{\bf B}$ drift-rate is high. Then the sparking process
starts, which produces not only strong radio emission but also
$e^{+}$ $e^{-}$ pairs. Few pulses periods later, the $e^{+}$ $e^{-}$
pairs accumulate in the accelerating gap and decrease the electric
field to a relatively stable value where sparking and radio
emission go on but the drift-rate reduces to nearly zero. As the
accumulation proceeds the gap electric field strength keeps on
weakening until sparking process breaks down and the radio emission
ceases. \citet{gmg03,ghm+08} refined the vacuum model by introducing
a thermal ion outflow from the hot polar cap surface.
However, the intensity variations and their correlation with
phase modulation of burst pulses at the beginning of burst blocks
need to be further investigated.
\citet{klo+06} noted that PSR B1931$+$24 turns `on' for 5$-$10 days
and `off' for 25$-$35 days, the switch occurs in a quasi-periodic
fashion, and no obvious emission can be found in the integrated
profile of null state. The difference of the slow-down rates in the
`on' and `off' states of this pulsar indicates a massive change in
magnetospheric currents. The quasi-periodic transition between `on'
and `off' state and non-detection of integrated energy by folding many
null pulses of PSR J1502$-$5653, suggest that the emission behavior of
this pulsar is somehow similar to that of the intermittent pulsar
B1931$+$24, but with very different `on' and `off' timescales. The
scenario of two slow-down rates of PSR B1931$+$24 may be applicable to
PSR J1502$-$5653, however, measuring two slow-down rates is not
possible for this pulsar, because of the very short durations of `on' and
`off' states. The timescale of the magnetospheric-current changing is
believed to be very short, and it is not yet clear whether and how the
pulses emitted at the very beginning of strong burst state are
influenced by the switching process of the magnetospheric currents.
\section{CONCLUSION} \label{sec:con}
The bursts of pulses of PSR J1502$-$5653 have a typical
duration of 1 min or about 100 pulse periods, and they are separated
by nulls lasting from 30 to 2800 pulse periods. The power spectra of
pulse sequence shows two broad peaks at periods of 11 min and 18 min,
revealing that the appearance of the emission may be quasi-periodical
in this pulsar. The nulling fraction estimated from the data is
93.6\%. The integrated profile of all null pulses shows no emission.
Interestingly, by integrating over 29 pulse sequences, a weak
and wide pulse is found just before the first detected single pulse
in all burst blocks.
At the beginning of burst after null the intensity usually rises to
the maximum immediately, and keeps a high intensity for few pulses,
then the intensity of next twenty or thirty pulses declines
exponentially, and gradually becomes stable at the half of the maximum
intensity. In most case, the cessation of radiation is gradual. The
peak phase of the first pulse in burst usually appears to be at later
longitudes than that of average profile, then the phase drifts quickly
to earlier longitudes for next several pulses. The drifting then
tends to slow down in next twenty to thirty pulse periods, and then is
replaced by irregular phase modulation. As the peak phase drifts to
earlier longitudes, the pulse intensity declines, meanwhile, the
radiation window broadens gradually till reaching the width of average
profile. A good correlation can be seen between intensity variation
and phase modulation in the early stage of post-null emission.
The phase modulation may be explained by electric field shielding
caused by $e^{+}$ $e^{-}$ pairs produced by sparking process. The
emission behaviors of individual pulses during the transition between
null and burst state may provide very important clue to understand the
underlying switching mechanism of nulling pulsars.
\subsection*{ACKNOWLEDGMENTS}
This work was funded by the National Natural Science Foundation of
China (NSFC) under No.10973026. We thank members of the Pulsar Group
at XAO for helpful discussions. The Parkes radio telescope is part
of the Australia Telescope which is funded by the Commonwealth
Government for operation as a National Facility managed by the
Commonwealth Scientific and Industrial Research Organization.
\bibliographystyle{mn2e}
|
{
"timestamp": "2012-06-28T02:01:23",
"yymm": "1206",
"arxiv_id": "1206.6156",
"language": "en",
"url": "https://arxiv.org/abs/1206.6156"
}
|
\section{Introduction} \label{section1}
Let $\mathcal{M}_0$ be a topological submanifold of $\mathbb{R}^N$ with boundary. In this paper we are interested in the problem of finding a Riemannian manifold $(\mathcal{M},g)$ which has \emph{minimal dilation} and satisfies $\partial \mathcal{M} =\partial \mathcal{M}_0$. In this setting, dilation is a functional on $L^\infty(\mathcal{M},\otimes^{(2)} T^*\mathcal{M})$, defined as the $L^\infty$ norm of the trace of the Distortion Tensor
\begin{equation}}\newcommand{\eeq}{\end{equation} \label{1.1}
\bold{G}\ :=\ \frac{g}{\det(g)^{1/n}} .
\eeq
This problem is an extension of the classical \emph{Teichm\"uller Problem} (see \cite{T, AIM, AIMO}). The scaling in \eqref{1.1} is such that $\bold{G}$ is invariant under conformal tranformations and, as we explain later, the geometric meaning of $\textrm{tr}(\bold{G})$ being ``minimal" is that ``geometry is distorted as less as possible''. As a first step, we consider a simplified problem for the case of immersions $u : \Omega \subseteq \mathbb{R}^n \longrightarrow \mathbb{R}^N$ with prescribed boundary values on $\partial \Omega$. Then, the dilation functional for $\mathcal{M}=u(\Omega)$ becomes
\begin{equation}}\newcommand{\eeq}{\end{equation} \label{1.2}
K_\infty(u,\Omega)\ :=\ \big\|K(Du)\big\|_{L^\infty(\Omega)},
\eeq
where $K$ will be called the \emph{dilation function} and is given by
\begin{equation}}\newcommand{\eeq}{\end{equation} \label{1.3}
K(P)\ :=\ \left\{\begin{array}{r}
\dfrac{|P|^2}{\det(P^\top \! P )^{1/n}}\ , \ \ \ \ \ \ \ \ \ \ \ \ \ \text{ on }S^+,\\
\ \ \ \ \ \ \ \ +\infty \ , \ \ \text{ on }\mathbb{R}^{N \times n} \setminus S^+.
\end{array}
\right.
\eeq
In \eqref{1.3}, $|P|=\textrm{tr}(P^\top\! P)^{1/2}$ is the Euclidean norm on $\mathbb{R}^{N \times n}$ and
\begin{equation}}\newcommand{\eeq}{\end{equation}
S^+ \ :=\ \Big\{P \in \mathbb{R}^{N \times n} \ :\ \det\big(P^\top \! P \big)>0\Big\}.
\eeq
Important objects of Geometric Topology related to \eqref{1.2} arise for $n=N$. Homeomorphisms $u : \Omega \subseteq \mathbb{R}^n \longrightarrow \mathbb{R}^n$ in $W^{1,n}_{loc}(\Omega)^N$ which satisfy $K_\infty(u,\Omega)<\infty$ are called \emph{Quasiconformal Maps} and constitute a class of maps well studied in the literature; see for example \cite{Ah2, B, G, S, V}. $L^p$ averages of Quasiconformal maps, that is weakly differentable homeomorphisms for which $\| K(Du) \|_{L^p(\Omega)}<\infty$ have also been systematically considered. \emph{Conformal maps}, namely those homeomorphisms $u : \Omega \subseteq \mathbb{R}^n \longrightarrow \mathbb{R}^n$ in $C^1(\Omega)^N$ which satisfy $Du^\top Du = \frac{1}{n}|Du|^2I$ form a special important class of Quasiconformal maps since for those $K(Du)$ is constant and equals $n$. Conformal maps preserve \emph{angles}, but not necessarily \emph{lengths} and hence distort the geometry of $\Omega$ in a controlled fashion. However, by Liouville's rigidity theorem, when $n\geq 3$ the only conformal maps that exist are compositions of rotations, dilations, and the inversion $x \mapsto x/|x|^2$. Hence, quasiconformal maps for which $K(Du)$ is merely bounded relax conformality but still deform $\Omega$ to $u(\Omega)$ in a fairly controlled fashion.
The problem with Quasiconformal maps is that too little information on their structure is provided by a mere norm bound, and the same holds for the \emph{finite distortion problem} when one restricts attention to minimisers of the dilation functional. The subtle point is that \eqref{1.2} is \emph{nonlocal}, in the sense that with respect to the $\Omega$ argument \eqref{1.2} is not a measure. Simple examples certify that minimisers over a domain with fixed boundary values are not local minimisers over subdomains and the direct method of Calculus of Variations when applied to \eqref{1.2} generally does not produce PDE solutions.
In the very recent work, Capogna and Raich \cite{CR}, remedied this problem by ``optimising" Quasiconformal maps. The idea is to consider an appropriate nonstandard $L^\infty$ variational problem for \eqref{1.2} and derive a PDE governing Optimal Quasiconformal Maps that can be used as platform for their qualitative study. Motivated by the classical results of Aronsson \cite{A1, A2} on \emph{Calculus of Variations in $L^\infty$}, they developed an $L^\infty$ variational approach for extremal (as they are called therein) quasiconformal maps. The essence of this approach is the following: let $Q_p u =0$ be the Euler-Lagrange system associated to the functional $\|K(Du)\|_{L^p(\Omega)} $. Then, at least formally $Q_p$ tends to a certain operator $Q_\infty$ and $\|K(Du)\|_{L^p(\Omega)}$ tends to $\|K(Du)\|_{L^\infty(\Omega)}$, both as $p\rightarrow \infty$. The operator $Q_\infty$ defines a quasilinear 2nd order system in non-divergence form. However, it is not a priori clear that the following rectagle ``commutes"
\begin{align} \label{1.8}
&\|K(Du)\|_{L^p(\Omega)} \ \ \ \ \longrightarrow\ \ \ \ Q_p u =0 \nonumber\\
& \ \ \ \downarrow\ p \rightarrow \infty\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \downarrow\ p \rightarrow \infty \\
&\|K(Du)\|_{L^\infty(\Omega)} \ \ \ \ \dashrightarrow \ \ \ \ Q_\infty u=0 \nonumber
\end{align}
so that $Q_\infty$ has a variational structure with respect to $K_\infty$, in the sense that appropriately defined minimisers of $K_\infty$ $u: \Omega \subseteq \mathbb{R}^n \longrightarrow \mathbb{R}^n$ solve $Q_\infty u =0$. In such an event, $Q_\infty u=0$ will play the role of ``Euler-Lagrange PDE" for the dilation functional. This turns out to be the case, though. Among other far-reaching contributions which include a deep study of dilations of extensions up to the boundary and quasiconformal gradient flows, Capogna and Raich introduced in \cite{CR} a localized minimality notion for \eqref{1.2} and proved that those local minimisers among ``competitors" indeed solve the formally derived PDE.
Simultaneously and independently, the author, also inspired by Aronsson's work and the successful modern evolution of the field of Calculus of Variations in $L^\infty$ (see for example \cite{C}), initiated the development of vector-valued Calculus of Variations in $L^\infty$ for general supremal functionals in \cite{K1}-\cite{K6} with particular emphasis to the model functional $\|Du\|_{L^\infty(\Omega)}=\textrm{ess} \, \sup_\Omega |Du|$. For a Hamiltonian $H\in C^2(\mathbb{R}^{N \times n})$ and the respective supremal functional
\begin{equation}}\newcommand{\eeq}{\end{equation} \label{1.6a}
E_\infty(u,\Omega)\ :=\ \|H(Du)\|_{L^\infty(\Omega)},
\eeq
the PDE system which plays the role of ``Euler-Langrange PDE" for \eqref{1.6a} is
\begin{equation}}\newcommand{\eeq}{\end{equation} \label{1.4a}
A_\infty u \, :=\, \Big(H_P \otimes H_P + H[H_P]^\bot H_{PP}\Big)(Du):D^2 u \, = \, 0 .
\eeq
Here $[H_P(Du(x))]^\bot$ is the projection on the nullspace of $H_P(Du(x))^\top : \mathbb{R}^N \longrightarrow \mathbb{R}^n$,
and $H_P,H_{PP}$ denotes derivatives (for details see Preliminaries \ref{section2}). The special case of $H(P)=|P|^2$ leads to the important \emph{$\infty$-Laplacian}
\begin{equation}}\newcommand{\eeq}{\end{equation} \label{1.5a}
\Delta_\infty u \, :=\, \Big(Du \otimes Du + |Du|^2[Du]^\bot \! \otimes I\Big):D^2 u \, = \, 0.
\eeq
System \eqref{1.4a} is a quasilinear 2nd order system in non-divergence form which arises in the limit of the Euler-Lagrange system of the $L^p$ functional $\|H(Du)\|_{L^p(\Omega)}$ as $p\rightarrow \infty$. In the scalar case of $n=1$ the normal coefficient of \eqref{1.5a} $ |Du|^2[Du]^\bot$ vanishes, and the same holds for submersions in general. The scalar $\infty$-Laplacian then becomes $Du\otimes Du :D^2u=0$.
Unlike the scalar case of $n=1$, in the full vector case of \eqref{1.4a} intriguing phenomena appear. Except for the emergence of ``singular solutions'' to \eqref{1.4a}, a further difficulty not present in the scalar case is that \emph{\eqref{1.4a} has discontinuous coefficients} even for $C^\infty$ solutions. There exist $C^\infty$ solutions whose rank of $H_P(Du)$ is not constant: such an example on $\mathbb{R}^2$ for \eqref{1.5a} is given by $u(x,y) = e^{ix}-e^{iy}$ which is $\infty$-Harmonic near the origin and has $\textrm{rk}(Du)=1$ on the diagonal, but it has $\textrm{rk}(Du)=2$ otherwise and hence the projection $[Du]^\bot$ is discontinuous (\cite{K1}). More sophisticated examples with interfaces which have junction and corners appear in \cite{K4}. In general, \emph{$\infty$-Harmonic maps present a phase separation} and on each phase the dimension of the tangent space is constant and these phases are separated by \emph{interfaces} whereon the rank of $Du$ ``jumps'' and $[Du]^\bot$ is discontinuous (\cite{K1}, \cite{K6}). Extensions of the results of \cite{K1}, \cite{K2} to the subelliptic setting appear in \cite{K3}. Moreover, it has very recently been established that the celebrated scalar $L^\infty$ uniqueness theory has no counterpart when $N\geq 2$ (\cite{K5}).
In this paper we work towards the problem mentioned in the beginning by extending the theory of \cite{CR} to the case of immersions $u : \Omega \subseteq \mathbb{R}^n \longrightarrow \mathbb{R}^N$ and in the same time we elaborate it and make it more efficient in certain respects. First of all, we allow for positive codimension $N-n$ and take into account the exterior geometry of immersions. Moreover, our maps are local diffeomorphisms onto their images, but in our analysis we do \emph{not} impose the global topological constraint that our maps are homemorphisms onto their image and allow for self-intersections. However, \emph{all} our results and notions are still valid and with the exact same proofs in this restricted class. For distinction, we introduce the following terminology: \emph{an immersion $u : \Omega \subseteq \mathbb{R}^n \longrightarrow \mathbb{R}^N$ in $C^1(\Omega)^N$ is called $p$-Quasiconformal when $\| K(Du) \|_{L^p(\Omega)}<\infty$, $1\leq p \leq \infty$}. We begin by repeating part of the program of \cite{K1}, \cite{K2} under the lens of \cite{CR} to the extended case. After some introductory material is Section \ref{section2}, in Section \ref{section3} we calculate the PDE system which Optimal $p$-Quasiconformal immersions satisfy (equations \eqref{3.8}, \eqref{3.9}), that is the Euler-Lagrange system of $K_p(u,\Omega):= \| K(Du) \|_{L^p(\Omega)}$. Then, in Section \ref{section4} we formally derive in the limit as $p\rightarrow \infty$ the PDE system which Optimal $\infty$-Quasiconformal immersions $u: \Omega \subseteq \mathbb{R}^n \longrightarrow \mathbb{R}^N$ satisfy, that is the system associated to \eqref{1.2}:
\begin{equation}}\newcommand{\eeq}{\end{equation} \label{1.4}
Q_\infty u \, :=\, \Big(K_P \otimes K_P + K[K_P]^\bot K_{PP}\Big)(Du):D^2 u \, = \, 0
\eeq
where the derivatives of the dilation are given by
\begin{align}
K_P(Du)\ &=\ 2Du \frac{g^{-1}S(g)}{\det(g)^{1/n}} \label{2.10a},\\
K_{PP}(Du)\ &= \ 2\left(I \otimes \frac{g^{-1}S(g)}{\det(g)^{1/n}} \ + \, Du \otimes Du : \frac{g^{-1}E}{\det(g)^{1/n}}\right)\ + \ O(Du). \label{2.10b}
\end{align}
Here $g=Du^\top\! Du$, $S$ is the Ahlfors operator given by \eqref{2.6}, $E$ is a constant tensor given by \eqref{3.3A} and $O(Du)$ is a tensor annihilated by $[K_P(Du)]^\bot$ and does not appear in the PDE system \eqref{1.4} (for details see Lemmas \ref{l1}, \ref{l1,1}). The derivation has overlaps with the respective in \cite{K1}, but is not a direct consequence since we utilise the specific structure of the Hamiltonian \eqref{1.3}. By restricting ourselves to $n=N$ and employing Lemma \ref{l6} to relate the seemingly different system \eqref{1.4} to that of \cite{CR}, we see that the derivation as $p\rightarrow \infty$ in \cite{CR} is incomplete and their PDE is only a part of \eqref{1.4}. System \eqref{1.4} consists of two systems whose defining vector-valued nonlinearities are normal to each other:
\begin{align}
K_P(Du) \otimes K_P(Du) :D^2u\, =&\, 0, \label{1.11}\\
[K_P(Du)]^\bot K_{PP}(Du): D^2 u \, =&\, 0. \label{1.12}
\end{align}
System \eqref{1.11} is the ``tangential" part in (the range of the projection) $[K_P(Du)]^\top$ and system \eqref{1.12} is the ``normal" part in $[K_P(Du)]^\bot$ (see Figure 1). The reason for this terminology is that $[Du]^\top$ is (the projection on) the tangent bundle of the immersion, $[Du]^\bot$ is its normal bundle and by \eqref{2.10a} we have that $[K_P(Du)]^\top \subseteq [Du]^\top$.
\[
\underset{\text{Figure 1.}}{\includegraphics[scale=0.24]{figure1,1}} \label{fig1}
\]
The derivation in \cite{CR} has lost information along directions in $[K_P(Du)]^\bot$ and reveals only system \eqref{1.11}. System \eqref{1.12} appears also in zero-codimension when $n=N$ since generally $K_P(Du)$ does not have rank equal to $n$, although by assumption the rank of $Du$ equals $n$. More importantly, \emph{when the rank of $K_P(Du)$ becomes nonconstant, the coefficients of \eqref{1.4} become discontinuous}. This leads to the \emph{appearance of interfaces} whereon the projection $[K_P(Du)]^\bot$ is discontinuous. These interfaces are boundaries of the different \emph{phases} to which Optimal $\infty$-Quasiconformal maps naturally separate.
In Section \ref{section5} we move to the variational structure of Optimal $\infty$-Quasiconformal maps. Inspired from \cite{K2}, we introduce the variational notion of \emph{$\infty$-Minimal Dilation}, which is Rank-One Locally Minimal Dilation with ``Minimally Distorted Area'' of $u(\Omega)$ (Definition \ref{def1}). Rank-one locally minimal dilation requires that an immersion is a local minimiser for the dilation functional when the ``set of competitors" is the one obtained by taking essentially scalar local variations with fixed zero boundary values (Figure 2). Minimally distorted area means that the immersion is a local minimiser where the ``set of competitors" is the one obtained by taking variations along sections of the normal vector bundle $[K_P(Du)]^\bot$ over $u(\Omega)$ with free boundary values (Figure 3). The appearance of interfaces where the dimension of $[K_P(Du)]^\bot$ jumps causes substantial difficulties, even in the very definition of the minimality notion. Our first main result is Theorem \ref{th1}, wherein we prove that $\infty$-Quasiconformal maps with $\infty$-Minimal Dilation are Optimal, \emph{at least off the interfaces of discontinuities in the coefficients}. This result follows closely Theorem 2.1 in \cite{K1} and Theorem 2.2 in \cite{K2}, but nonconvexity of \eqref{1.3}, appearance of discontinuities in \eqref{1.4} and the necessity of restriction to specific variations create complications not present in the results just quoted. We note that the rank-one minimality notion gives rise to the tangential system and the condition on the minimality of the area gives rise to the normal system.
In Section \ref{section6} we study some geometric aspects of \eqref{1.4} and of the interfaces of its solutions. In Subsection \ref{subsection6.1} we show that system \eqref{1.4} has a ``geometric" rather coordinate-free reformulation, at least off interfaces of discontinuities. More precisely, \eqref{1.11} and \eqref{1.12} are respectively equivalent to
\begin{align}
S(\bold{G})D\big(\textrm{tr}(\bold{G})\big)\ &= \ 0 , \label{1.13}\\
\mathbb{B}^\bot : \big(\textrm{tr}(\bold{G})\big)_{P} \ &= \ 0,\label{1.14}
\end{align}
where $\bold{G}$ is given by \eqref{1.1} for $g=Du^\top \! Du$ and $\mathbb{B}^\bot$ is a ``generalized 2nd fundamental form" with respect to normal sections valued in $[K_P(Du)]^\bot$. If $K_P(Du)$ has full rank $n$, then $[K_P(Du)]^\bot$ coincides with the normal bundle $[Du]^\bot$ of the immersion and $\mathbb{B}^\bot$ reduces to the standard object. System \eqref{1.13} is quite ``metrically invariant" but system \eqref{1.14} depends on the exterior geometry and measures the ``shape of $u(\Omega)$". In Subsection \ref{subsection6.2}, by assuming some a priori local $C^1$ regularity on the interfaces but with possible self-intersections, we prove an identity which shows that the covariant gradient of $[K_P(Du)]^\bot$ along the interface is differentiable when projected along $K_P(Du)$.
In Section \ref{section7} we turn our attention to the converse statement of that in Theorem \ref{th1}, that is the sufficiency of \eqref{1.4} for the variational notion of $\infty$-Minimal Dilation. Nonconvexity of \eqref{1.3} and the resemblance to similar phenomena in \emph{Minimal Surfaces} leaves little hope for system \eqref{1.12} to be sufficient for minimally distorted area. However, in Proposition \ref{c8} we establish that when $n=2\leq N$ there is a triple equivalence among solutions of \eqref{1.11}, the condition the dilation \eqref{1.3} to be constant and the immersion to have rank-one locally minimal dilation. This result relates directly to the two-dimensional results in \cite{Ah1, B, H}. In particular, when $n=2$ interfaces disappear and the coefficients of \eqref{1.4} become continuous.
Moreover, as a consequence of Example \ref{ex1} which certifies that rank-one locally minimal dilation is \emph{strictly weaker} than the variational notion utilized in \cite{CR} with respect to general vector-valued variations (among competitors), \emph{we disprove the conjecture of Capogna-Raich on the sufficiency of \eqref{1.3} explicitely stated in p.\ 855}. Finally, at the end of Section \ref{section7} we loosely discuss the much more complicated case when $n\geq 3$. In this case results are less sharp. Although it is hardly conclusive, it seems that dilation may not be constant but we do believe that \eqref{1.11} is still sufficient for rank-one locally minimal dilation.
Throughout this paper, as in \cite{CR} and also in \cite{K1}-\cite{K6}, we restrict our analysis to the unnatural class of $C^2$ solutions. This is only the first step in our study and we can not go much further without an appropriate ``weak'' theory of nondifferentiable solutions for \eqref{1.4}. In the forthcoming paper \cite{K7} we introduce such an approach which applies to fully nonlinear PDE systems and in this setting therein we consider the problem of existence for the $\infty$-Laplacian \eqref{1.5a}. This opens up the way towards the rigorous efficient study of nonsmooth Optimal Quasiconformal maps.
\section{Preliminaries.} \label{section2}
Throughout this paper we reserve $n,N \in \mathbb{N}$ for the dimensions of Euclidean spaces and $\mathbb{S}^{N-1}$ denotes the unit sphere of $\mathbb{R}^N$. Greek indices $\alpha, \beta, \gamma,... $ run from $1$ to $N$ and Latin $i,j,k,...$ form $1$ to $n$. The summation convention will always be employed in repeated indices in a product. Vectors are always viewed as columns and we differentiate along rows. Hence, for $a,b\in \mathbb{R}^n$, $a^\top b$ is their inner product and $ab^\top$ equals $a \otimes b$. If $u=u_\alpha e_\alpha : \Omega \subseteq \mathbb{R}^n \longrightarrow \mathbb{R}^N$ is in $C^2(\Omega)^N$, the gradient matrix $Du$ is viewed as $D_i u_\alpha e_\alpha \otimes e_i : \Omega \longrightarrow \mathbb{R}^{N \times n}$ and the Hessian tensor $D^2u$ as $D^2_{ij} u_\alpha e_\alpha \otimes e_i \otimes e_j: \Omega \longrightarrow \mathbb{R}^{N \times n^2}$. The Euclidean (Frobenious) norm on $\mathbb{R}^{N\times n}$ is $|P|=(P_{\alpha i}P_{\alpha i})^{1/2} = (\textrm{tr} (P^\top P))^{1/2}$. We also introduce the following \emph{contraction operation} for tensors which extends the Euclidean inner product $P:Q=\textrm{tr}(P^\top Q)=P_{\alpha i}Q_{\alpha i}$ of $\mathbb{R}^{N\times n}=\mathbb{R}^N \otimes \mathbb{R}^n$. Let ``$\otimes^{(r)}$'' denote the $r$-fold tensor product. If $S\in \otimes^{(q)}\mathbb{R}^N \otimes^{(s)} \mathbb{R}^n$, $T \in \otimes^{(p)}\mathbb{R}^N \otimes^{(s)} \mathbb{R}^n$ and $q\geq p$, we define a tensor $S:T$ in $\otimes^{(q-p)} \mathbb{R}^N$ by
\begin{equation}}\newcommand{\eeq}{\end{equation} \label{1.24}
S:T \ :=\ \big(S_{\alpha_q ...\alpha_p... \alpha_1 \, i_s ... i_1} T_{\alpha_{p} ... \alpha_1 \, i_s ... i_1} \big) \, e_{\alpha_q} \otimes ... \otimes e_{\alpha_{p+1}}.
\eeq
For example, for $s=q=2$ and $p=1$, the tensor $S:T$ of \eqref{1.24} is a vector with components $S_{\alpha \beta i j}T_{\beta ij}$ with free index $\alpha$ and the indices $\beta,i,j$ are contracted. In particular, in view of \eqref{1.24}, the 2nd order linear system
\begin{equation}}\newcommand{\eeq}{\end{equation}
A_{\alpha i \beta j}D^2_{ij}u_\beta \, +\, B_{\alpha \gamma k} D_ku_\gamma + C_{\alpha \delta} u_\delta\, =\, f_\alpha ,
\eeq
can be compactly written as $A$:$D^2u + B$:$Du+Cu=f$, where the meaning of ``$:$" in the respective dimensions is made clear by the context. Let now $P : \mathbb{R}^n \longrightarrow \mathbb{R}^N$ be linear map. The identity map of $\mathbb{R}^N$ splits as $I=[P]^\top \oplus [P]^\bot$, where $[P]^\top$ and $[P]^\bot$ denote orthogonal projection on range $R(P)$ and nullspace $N(P^\top)$ respectively. Moreover, for the dilation function \eqref{1.3}, we have $K(P)\geq n$ and $K(P)=n$ if and only if $P^\top\!P = \lambda I$ with $\lambda=\frac{1}{n}|P|^2$. This property of $K$ is a simple consequence of the inequality of arithmetic-geometric mean applied to the $n$ eigenvalues of $P^\top\!P$ by utilising the Spectral Theorem. Let us now recall some elementary properties of determinants. If $A=A_{ij}e_i \otimes e_j \in \mathbb{R}^n \otimes \mathbb{R}^n$, we have
\begin{align}
\textrm{cof}(A)_{ij}\ :=& \ (-1)^{i+j} \det \Big(\underset{k\neq i, l \neq j}{\Sigma} A_{kl}e_k \otimes e_l \Big),\\
\textrm{cof}(A)\ :=& \ \textrm{cof}(A)_{ij} e_i \otimes e_j ,
\end{align}
\begin{align}
A\, \textrm{cof}(A)^\top \ =& \ \textrm{cof}(A)^\top A\ = \ \det(A)I,\\
D_{A_{ij}}\big(\det(A)\big)\, \equiv & \ \, \big(\det(A)\big)_{A_{ij}} = \ \textrm{cof}(A)_{ij}.
\end{align}
Obviously, subscript denotes partial derivative. The \emph{Ahlfors operator} is defined by
\begin{equation}}\newcommand{\eeq}{\end{equation} \label{2.6}
S(A)\ := \ \frac{1}{2}\big(A+A^\top \big)\ - \ \frac{1}{n}\textrm{tr}(A) I
\eeq
and has the property that for any $A$, $S(A)$ is symmetric and traceless, that is $\textrm{tr}(S(A))=0$. Let now $u : \Omega \subseteq \mathbb{R}^n \longrightarrow \mathbb{R}^N$ be an immersion in $C^1(\Omega)^N$. Then, the rank of $Du$ satisfies $\textrm{rk}(Du)=n\leq N$. $u$ is \emph{Conformal} when there is $f\in C^0(\Omega)$ such that $Du^\top \! Du =f^2 I$ on $\Omega$, that is $D_i u_\alpha D_j u_\alpha=f^2\delta_{ij}$. For immersions, the Riemannian metric on $u(\Omega)$ induced from $\mathbb{R}^N$ is $g:= Du^\top \! Du$ and $g^{-1}$ denotes the pointwise inverse of the positive symmetric tensor $g$. Since $S(g)=g-\frac{1}{n}\textrm{tr}(g)I$, we have the commutativity relation
\begin{equation}}\newcommand{\eeq}{\end{equation}
g^{-1} S(g)\ =\ S(g)g^{-1} \ =\ I\, -\, \dfrac{\textrm{tr}(g)}{n } g^{-1}
\eeq
which will be tacitly used in the sequel. In view of these conventions, the PDE system describing Optimal Quasiconformal immersions in index form reads
\begin{equation}}\newcommand{\eeq}{\end{equation}
\Big( K_{P_{\alpha i}} K_{P_{\beta j}} + \, K[K_P]_{\alpha \gamma}^\bot K_{P_{\gamma i}P_{\beta j}} \Big)(Du) \, D^2_{ij} u_\beta\ = \ 0.
\eeq
The derivatives $K_P,K_{PP}$ of $K$ appearing here and in \eqref{2.10a}, \eqref{2.10b} are given in index form by \eqref{3.1a}, \eqref{3.2A}. Finally, we will use the notation ``$\Gamma$'' for sections of vector bundles. We note that our terminology of ``$p$-Quasiconformal" slightly deviates from the usage of this term in the literature, but its purpose is to avoid the less elegant term ``$L^p$-Quasiconformal". Since we are only interested in the extreme case of $p=\infty$, there will be no confusion. We conclude by observing thatwhen $\Omega \Subset \mathbb{R}^n$, all immersions $u : \overline{\Omega} \subseteq \mathbb{R}^n \longrightarrow \mathbb{R}^N$ in $C^1(\overline{\Omega})^N$ are $p$-Quasiconformal for all $p\in [1,\infty]$.
\section{Derivation of the Euler-Lagrange PDE System Governing Optimal $p$-Quasiconformal Immersions.} \label{section3}
In this section we calculate the specific form of the Euler-Lagrange system associated to the functional $\| K(Du)\|^p_{L^p(\Omega)}$ which Optimal $p$-Quasiconformal immersions satisfy. We begin by calculating first and second derivatives of \eqref{1.3}.
\begin{lemma} \label{l1} Let $K$ be given by \eqref{1.3}. Then, $K\in C^1(S^+)$ and its derivative is given by
\begin{equation}}\newcommand{\eeq}{\end{equation} \label{3.1}
K_P(P)\ =\ 2P\frac{\big(P^\top \! P \big)^{-1}S\big(P^\top \! P \big)}{\det\big(P^\top \! P \big)^{1/n}}.
\eeq
\end{lemma}
In index form \eqref{3.1} can be written as
\begin{equation}}\newcommand{\eeq}{\end{equation} \label{3.1a}
K_{P_{\alpha i}} (P)\ =\ 2P_{\alpha m} \Bigg( \frac{ \delta_{mi}-\frac{1}{n}|P|^2\big(P^\top \! P \big)_{mi}^{-1} }{\det\big(P^\top \! P \big)^{1/n}} \Bigg).
\eeq
\medskip \noindent \textbf{Proof of Lemma} \ref{l1}. We begin by observing the triviality that for $P\in S^+$, the matrix $P^\top \! P$ is positive symmetric on $\mathbb{R}^n$ and also
\begin{equation}}\newcommand{\eeq}{\end{equation}
\big(P^\top \! P \big)^{-1,\top}\ =\ \big(P^\top \! P \big)^{\top,-1}\ =\ \big(P^\top \! P \big)^{-1}.
\eeq
By differentiation of \eqref{1.3}, we have
\begin{align} \label{3.3}
K_{P_{\alpha i}}(P)\ =& \ \frac{\ 2P_{\alpha i}\det\big(P^\top \! P \big)^{\frac{1}{n}} \, -\, \dfrac{|P|^2}{n} \det\big(P^\top \! P \big)^{\frac{1}{n}-1} \textrm{cof}\big(P^\top \! P \big)_{kl}(P_{\beta k}P_{\beta l})_{P_{\alpha i}} }{\det\big(P^\top \! P \big)^{2/n}}
\nonumber\\
=& \ \frac{\ 2P_{\alpha i} \, -\, \dfrac{|P|^2}{n \det\big(P^\top \! P \big) }\textrm{cof}\big(P^\top \! P \big)_{kl} \big( \delta_{\alpha \beta} \delta_{ik}P_{\beta l} \, +\, \delta_{\alpha \beta} \delta_{il}P_{\beta k} \big)}{\det\big(P^\top \! P \big)^{1/n}}.
\end{align}
Thus,
\begin{align} \label{3.4}
K_{P_{\alpha i}}(P)\ =& \ \ \frac{\ 2P_{\alpha i} \, -\, \dfrac{|P|^2}{n \det\big(P^\top \! P \big) } \Big( \textrm{cof}\big(P^\top \! P \big)_{il} P_{\alpha l} +\, \textrm{cof}\big(P^\top \! P \big)_{ki} P_{\alpha k} \Big)}{\det\big(P^\top \! P \big)^{1/n}}
\nonumber \\
=& \ 2P_{\alpha m} \, \frac{\ \delta_{m i}\, -\, \dfrac{|P|^2}{n \det\big(P^\top \! P \big) } \dfrac{1}{2}\Big( {\textrm{cof}\big(P^\top \! P \big)_{im}+ \, \textrm{cof}\big(P^\top \! P \big)_{mi} } \Big)}{\det\big(P^\top \! P \big)^{1/n}}.
\end{align}
Hence, \eqref{3.4} gives
\begin{equation}}\newcommand{\eeq}{\end{equation} \label{3.5}
K_P(P)\ = \ \frac{2P}{\det\big(P^\top \! P \big)^{1/n}}\left(I\, -\, \dfrac{|P|^2}{n } \left( \dfrac{\textrm{cof}\big(P^\top \! P \big)^\top +\, \textrm{cof}\big(P^\top \! P \big)}{2 \det\big(P^\top \! P \big)} \right)\right)
\eeq
and by using that
\begin{equation}}\newcommand{\eeq}{\end{equation}
\textrm{cof}\big(P^\top \! P \big)^\top =\ \textrm{cof}\big(P^\top \! P \big) \ =\ \big(P^\top \! P \big)^{-1}\det\big(P^\top \! P \big),
\eeq
equation \eqref{3.5} gives
\begin{align} \label{3.6}
K_P(P)\ =& \ \frac{2P}{\det\big(P^\top \! P \big)^{1/n}}\left(I\, -\, \dfrac{|P|^2}{n } \big(P^\top \! P \big)^{-1} \right)
\nonumber\\
=& \ 2P \frac{\big(P^\top \! P \big)^{-1} }{\det\big(P^\top \! P \big)^{1/n}}\left(P^\top \! P\, -\, \dfrac{|P|^2}{n } I\right).
\end{align}
In view of \eqref{3.6}, formula \eqref{3.1} has been established. \qed
\begin{lemma} \label{l1,1} Let $K$ be given by \eqref{1.3}. Then, $K\in C^2(S^+)$ and its 2nd derivative is given by
\begin{equation}}\newcommand{\eeq}{\end{equation} \label{3.1A}
K_{PP}(P)\ =\ 2 I \otimes \frac{\big(P^\top \! P \big)^{-1} S\big(P^\top \! P \big)}{\det\big(P^\top \! P \big)^{1/n}}\ + \ 2 P \otimes P : \frac{\big(P^\top \! P \big)^{-1} E}{\det\big(P^\top \! P \big)^{1/n}}\ + \ O(P)
\eeq
which in index form can be written as
\begin{align} \label{3.2A}
K_{P_{\alpha i}P_{\beta j}} (P)\ =\ & 2\delta_{\alpha \beta} \Bigg( \frac{\big(P^\top \! P \big)_{ik}^{-1} \big( P_{\gamma k } P_{\gamma j}-\frac{1}{n}|P|^2\delta_{kj} \big) }{\det\big(P^\top \! P \big)^{1/n}} \Bigg) \nonumber\\
& +\ 2P_{\alpha m}P_{\beta l } \Bigg( \frac{ \big(P^\top \! P \big)_{ik}^{-1} E_{kjlm} }{\det\big(P^\top \! P \big)^{1/n}}\Bigg) \ + \ O_{\alpha i \beta j}(P).
\end{align}
Here $O_{\alpha i \beta j}(P)$ is a tensor of the form $K_{P_{\alpha m}}(P)A_{m\beta i j}(P)$ and is annihilated by $[K_P(P)]_{\gamma \alpha}^\bot$, that is $[K_P(P)]^\bot O(P)=0$.\ $E$ is a constant 4th order tensor whose components $E_{kjlm}$ are given by
\begin{equation}}\newcommand{\eeq}{\end{equation} \label{3.3A}
E_{kjlm}\ := \ \delta_{ml}\delta_{jk} + \delta_{mj}\delta_{kl} -\frac{2}{n}\delta_{mk}\delta_{jl} .
\eeq
\end{lemma}
The explicit form of the tensor $O_{\alpha i \beta j}(P)$ is a complicated formula which follows by the proof of Lemma \ref{l1,1}, but we do not need this formula because is ``killed" by $[K_P(P)]^\bot$ and doe not appear in \eqref{1.4}.
\medskip \noindent \textbf{Proof of Lemma} \ref{l1,1}. We begin by calculating the derivative $\big(\big(P^\top \! P \big)_{mi}^{-1}\big)_{P_{\beta j}}$. We have
\begin{equation}}\newcommand{\eeq}{\end{equation} \label{3.4A}
\big(P^\top \! P \big)_{mi}^{-1} \big(P^\top \! P \big)_{ik}\ = \ \delta_{mk}
\eeq
which gives
\begin{align} \label{3.5A}
\big(\big(P^\top \! P \big)_{mi}^{-1} \big)_{P_{\beta j}} \big(P^\top \! P \big)_{ik} \ &= \ - \big(P^\top \! P \big)_{mi}^{-1} (P_{\gamma i} P_{\gamma k})_{P_{\beta j}} \nonumber\\
&= \ - \big(P^\top \! P \big)_{mi}^{-1}[ \delta_{\beta \gamma} \delta_{ij}P_{\gamma k} + P_{\gamma i} \delta_{\beta \gamma} \delta_{kj} ]\\
&= \ - \big(P^\top \! P \big)_{ml}^{-1} [ P_{\beta k}\delta_{lj} + P_{\beta l} \delta_{kj} ]. \nonumber
\end{align}
Hence, we have
\begin{equation}}\newcommand{\eeq}{\end{equation} \label{3.6A}
\big(\big(P^\top \! P \big)_{mi}^{-1} \big)_{P_{\beta j}} \ = \ - \big(P^\top \! P \big)_{ml}^{-1} [ P_{\beta k}\delta_{lj} + P_{\beta l} \delta_{kj} ] \big(P^\top \! P \big)_{ki}^{-1}.
\eeq
Now we differentiate \eqref{3.1a}:
\begin{align} \label{3.7A}
K_{P_{\alpha i}P_{\beta j}} (P)\ =\ \, & 2\delta_{\alpha \beta}\delta_{mj} \Bigg( \frac{ \delta_{mi}-\frac{1}{n}|P|^2\big(P^\top\!P\big)^{-1}_{mi}}{\det\big(P^\top \! P \big)^{1/n}} \Bigg) \ - \ 2P_{\alpha m} \Bigg( \frac{\big(|P|^2\big(P^\top\!P\big)^{-1}_{mi}\big)_{P_{\beta j}}}{n\det\big(P^\top \! P \big)^{1/n}} \Bigg)
\nonumber\\
& - \ \Bigg[ 2P_{\alpha m} \Bigg( \frac{ \delta_{mi} -\frac{1}{n}|P|^2\big(P^\top\!P\big)^{-1}_{mi}}{\det\big(P^\top \! P \big)^{1/n}} \Bigg) \Bigg]
\frac{ \big(\det\big(P^\top \! P \big)^{1/n}\big)_{P_{\beta j}} }{\det\big(P^\top \! P \big)^{1/n}}.
\end{align}
In view of \eqref{3.1a}, the last summand in \eqref{3.7A} is annihilated by the projection $[K_P(P)]_{\gamma \alpha}^\bot$. We rewrite \eqref{3.7A} as
\begin{align} \label{3.8A}
K_{P_{\alpha i}P_{\beta j}} (P)\ =\ \, &2\delta_{\alpha \beta} \Bigg( \frac{ \delta_{ij}-\frac{1}{n}|P|^2\big(P^\top\!P\big)^{-1}_{ij}}{\det\big(P^\top \! P \big)^{1/n}} \Bigg) \nonumber\\
& - \ 2P_{\alpha m} \Bigg( \frac{ \big(|P|^2\big(P^\top\!P\big)^{-1}_{mi}\big)_{P_{\beta j}}}{n\det\big(P^\top \! P \big)^{1/n}} \Bigg) \ +\ O_{\alpha i \beta j}(P).
\end{align}
By using \eqref{3.6A} in \eqref{3.8A}, we have
\begin{align} \label{3.9A}
K_{P_{\alpha i}P_{\beta j}} (P)\ =& \ \, 2\delta_{\alpha \beta} \Bigg( \frac{ \delta_{ij}-\frac{1}{n}|P|^2\big(P^\top\!P\big)^{-1}_{ij}}{\det\big(P^\top \! P \big)^{1/n}} \Bigg) \ + S_{\alpha i \beta j}(P)\ +\ O_{\alpha i \beta j}(P),
\end{align}
where we have set
\begin{equation}}\newcommand{\eeq}{\end{equation} \label{3.10A}
S_{\alpha i \beta j}(P)\ := \ \frac{2}{n} P_{\alpha m} \frac{ 2P_{\beta j} \big(P^\top\!P\big)^{-1}_{mi} -|P|^2 \big(P^\top \! P \big)_{ml}^{-1} [ P_{\beta k}\delta_{lj} + P_{\beta l} \delta_{kj} ] \big(P^\top \! P \big)_{ki}^{-1} }{ \det\big(P^\top \! P \big)^{1/n} } .
\eeq
Equation \eqref{3.10A} gives
\begin{align} \label{3.10B}
S_{\alpha i \beta j} (P)\ = \ & -\ \frac{4}{n} P_{\alpha m}P_{\beta j} \frac{ \big(P^\top\!P\big)^{-1}_{mi} }{\det\big(P^\top \! P \big)^{1/n} } \nonumber\\
&+\ 2 P_{\alpha m} \left( \frac{\frac{1}{n}|P|^2\big(P^\top\!P\big)^{-1}_{mj}}{\det\big(P^\top \! P \big)^{1/n} }\right) \big(P^\top\!P\big)^{-1}_{ki}P_{\beta k}\\
&+\ 2 P_{\alpha m} \left( \frac{\frac{1}{n}|P|^2\big(P^\top\!P\big)^{-1}_{mk}}{\det\big(P^\top \! P \big)^{1/n} }\right) \big(P^\top\!P\big)^{-1}_{ij}P_{\beta k}. \nonumber
\end{align}
We rewrite \eqref{3.10B} as
\begin{align} \label{3.11A}
S_{\alpha i \beta j} (P)\ = \ & -\ \frac{4}{n} P_{\alpha m}P_{\beta j} \frac{ \big(P^\top\!P\big)^{-1}_{mi} }{\det\big(P^\top \! P \big)^{1/n} } \nonumber\\
&+\ 2 P_{\alpha m} \left( \frac{-\delta_{mj} +\frac{1}{n}|P|^2\big(P^\top\!P\big)^{-1}_{mj}}{\det\big(P^\top \! P \big)^{1/n} } \ + \ \frac{\delta_{mj} }{\det\big(P^\top \! P \big)^{1/n} }
\right) \big(P^\top\!P\big)^{-1}_{ki}P_{\beta k}\\
&+\ 2 P_{\alpha m} \left( \frac{-\delta_{mk}+\frac{1}{n}|P|^2\big(P^\top\!P\big)^{-1}_{mk}}{\det\big(P^\top \! P \big)^{1/n} }\ +\ \frac{\delta_{mk}}{\det\big(P^\top \! P \big)^{1/n} }\right) \big(P^\top\!P\big)^{-1}_{ij}P_{\beta k} \nonumber
\end{align}
and observe that in view of \eqref{3.1a}, $[K_P(Du)]_{\gamma \alpha}^\bot$ annihilates the first summands in the brackets of \eqref{3.11A} and $S_{\alpha i \beta j} (P)$ simplifies to
\begin{align} \label{3.12A}
S_{\alpha i \beta j} (P)\ =& \ \, 2\frac{ P_{\alpha k}P_{\beta k} \big(P^\top\!P\big)^{-1}_{ij}
+ P_{\alpha j}P_{\beta k} \big(P^\top\!P\big)^{-1}_{ki} - \frac{2}{n} P_{\alpha m}P_{\beta j}\big(P^\top\!P\big)^{-1}_{mi}
}{\det\big(P^\top \! P \big)^{1/n} } \nonumber\\
& +\ O_{\alpha i \beta j} (P),
\end{align}
for some tensor $O_{\alpha i \beta j} (P)$ annihilated by $[K_P(Du)]_{\gamma \alpha}^\bot$. We rewrite \eqref{3.12A} as
\begin{align} \label{3.13A}
S_{\alpha i \beta j} (P)\, = \, 2 P_{\alpha m}P_{\beta l} \big(P^\top\!P\big)^{-1}_{ki}\left(\frac{ \delta_{ml}\delta_{jk} + \delta_{mj}\delta_{kl} -\frac{2}{n}\delta_{mk}\delta_{jl} }{\det\big(P^\top \! P \big)^{1/n} } \right) + O _{\alpha i \beta j} (P).
\end{align}
In view of \eqref{3.13A}, \eqref{3.10A}, \eqref{3.9A} and \eqref{3.3A}, equation \eqref{3.2A} follows.
\qed
\medskip
In view of Lemma \ref{l1}, the Euler-Lagrange system describing Optimal $p$-Quasiconformal immersions $u : \Omega \subseteq \mathbb{R}^n \longrightarrow \mathbb{R}^N$ is
\begin{equation}}\newcommand{\eeq}{\end{equation} \label{3.8}
Q_p u\ := \ \textrm{Div}\Big(K(Du)^{p-1} K_P(Du) \Big)\ = \ 0.
\eeq
In view of \eqref{3.1}, \eqref{3.8} can be written in index form as
\begin{equation}}\newcommand{\eeq}{\end{equation} \label{3.9}
D_i \left( \left(\frac{\textrm{tr}(g)}{\det(g)^{1/n}}\right)^{p-1} \! D_k u_\alpha \,\frac{ g^{-1}_{km} S(g)_{mi}}{\det(g)^{1/n}}\right) \ = \ 0,
\eeq
where $g=Du^\top \! Du$ is the Riemannian metric and $S$ is the Ahlfors operator of \eqref{2.6}.
\section{Derivation of the PDE System Governing Optimal $\infty$-Quasiconformal Immersions.} \label{section4}
The derivation we perform is this section can be deduced by a reworking of our results in \cite{K1, K2} and application of Lemmas \ref{l1} and \ref{l1,1} proved previously, but for the reader's convenience it is best to argue at the outset. Let $u : \Omega \subseteq \mathbb{R}^n \longrightarrow \mathbb{R}^N$ be an immersion in $C^2(\Omega)^N$. By distributing derivatives in \eqref{3.8}, we have
\begin{align} \label{4.1}
(p -1) K^{p-2} K_{P_{\alpha i}}(Du) K_{P_{\beta j}}(Du) D^2_{ij}u_\beta \ + \
K^{p-1}K_{P_{\alpha i} P_{\beta j}}(Du) D^2_{ij}u_\beta \ = \ 0.
\end{align}
For each $x\in \Omega$, $K_P\big((Du)(x)\big) : \mathbb{R}^n \longrightarrow \mathbb{R}^N$ is a linear map. We define the orthogonal projections
\begin{align}
[K_P(Du)]^\bot\ & :=\ \textrm{Proj}_{N((K_P(Du))^\top)}, \label{4.3}\\
[K_P(Du)]^\top\ & :=\ \textrm{Proj}_{R(K_P(Du))}, \label{4.4}
\end{align}
which are the projections on nullspace of $(K_P(Du))^\top$ and range of $K_P(Du)$ respectively. We rewrite \eqref{4.1} by applying the expansion $I = [K_P(Du)]^\bot + [K_P(Du)]^\top$ of the identity of $\mathbb{R}^N$ and contract the derivative in the left hand side to obtain
\begin{align} \label{4.5}
K_P(Du) D\big(K(Du)\big) \ \,+& \ \frac{K}{p -1} [K_P(Du)]^\top K_{PP}(Du):D^2u \nonumber\\
=& \ -\frac{K}{p -1} [K_P(Du)]^\bot K_{PP}(Du):D^2u.
\end{align}
The left hand side is a vector valued in $[K_P(Du)]^\top$ and the right hand side is a vector valued in $[K_P(Du)]^\bot$. By orthogonality, left and right hand side vanish and actually \eqref{4.5} decouples to two systems. We rescale the right hand side of \eqref{4.5} by multiplying by $p-1$ and rearrange to obtain
\begin{align}
K_P(Du) \otimes K_P(Du):D^2u \ \,+ & \ \, K[K_P(Du)]^\bot K_{PP}(Du):D^2u \nonumber\\
=& \ -\frac{K(Du)}{p -1} [K_P(Du)]^\top K_{PP}(Du):D^2u.
\end{align}
We rewrite as
\begin{align} \label{4.8}
\Big( K_P \otimes K_P + K[K_P]^\bot K_{PP}\Big)(Du):D^2u\ = \ -\frac{K [K_P]^\top K_{PP}}{ p -1}(Du):D^2u .
\end{align}
As $p \rightarrow \infty$, \eqref{4.8} leads to \eqref{1.4}.
\begin{remark}
We note that we can also remove the dilation function $K$ from the normal coefficient $ [K_P]^\bot K_{PP}$ with the renormalisation because it is strictly positive: $K(Du)\geq n >0$. We do not have this option in the case of the general system \eqref{1.4a}, because $|H(Du)|$ may vanish. However, when $n=2\leq N$ and $H(P)=|P|^2$, in \cite{K6} we show that non-constant $\infty$-Harmonic maps have no interior gradient zeros: either $|Du|>0$ or $|Du|\equiv 0$.
\end{remark}
The next differential identity relates our system \eqref{1.4} with the seemingly different Aronsson PDE system of Capogna-Raich in \cite{CR}. In particular, it follows that even when $n=N$ the PDE system derived in \cite{CR} is only a projection of \eqref{1.4} along $[K_P(Du)]^\top$. Hence, the PDE system in \cite{CR} seems to fail to encapsulate all the information of optimised quasiconformal maps.
\begin{lemma}}\newcommand{\el}{\end{lemma} \label{l6} Let $u : \Omega \subseteq \mathbb{R}^n \longrightarrow \mathbb{R}^n$ be a local diffeomorphism in $C^1(\Omega)^n$. Then, we have the identity
\begin{equation}}\newcommand{\eeq}{\end{equation}
K_P(Du)\ = \ -\frac{2K(Du)}{n}\left( (Du)^{-1,\top} - n\frac{Du}{|Du|^2}\right)
\eeq
where $K$ and $K_P$ are given by \eqref{1.3} and \eqref{3.1}.
\el
\medskip \noindent \textbf{Proof of Lemma} \ref{l6}. By observing that for any invertible $A\in \mathbb{R}^n \otimes \mathbb{R}^n$ there holds $A^{-1,\top}=A^{\top,-1}$, we have
\begin{equation}}\newcommand{\eeq}{\end{equation} \label{5.37}
\big(Du^\top \! Du \big)^{-1}\ = \ (Du)^{-1} (Du)^{\top,-1}\ = \ (Du)^{-1} (Du)^{-1,\top}.
\eeq
Thus, we obtain
\begin{align} \label{5.38}
(Du)^{-1,\top} - n\frac{Du}{|Du|^2}\ = & \ -\frac{n}{|Du|^2} \left( Du - \frac{|Du|^2}{n} (Du)^{-1,\top}
\right) \nonumber \\
= & \ -\frac{n}{|Du|^2} \left( Du - \frac{|Du|^2}{n} Du(Du)^{-1} (Du)^{-1,\top} \right) \\
= & \ -\frac{n}{|Du|^2} Du \left(I - \frac{|Du|^2}{n} (Du)^{-1}(Du)^{-1,\top} \right). \nonumber
\end{align}
Consequently, by \eqref{5.37} and \eqref{5.38}, we obtain
\begin{align}
-\frac{|Du|^2}{n}\left( (Du)^{-1,\top} - n\frac{Du}{|Du|^2} \right)\ = & \ Du \left( I - \frac{|Du|^2}{n}\big(Du^\top \!Du \big)^{-1} \right) \nonumber\\
= & \ Du\, \big(Du^\top \!Du \big)^{-1}\left( Du^\top \!Du - \frac{|Du|^2}{n}I \right).
\end{align}
Hence, by \eqref{3.1} and \eqref{1.3} we have
\begin{align}
-\frac{2K(Du)}{n}\left( (Du)^{-1,\top} - n\frac{Du}{|Du|^2} \right)\
= & \ 2Du\, \big(Du^\top \!Du \big)^{-1} \left( \frac{ Du^\top \!Du - \frac{|Du|^2}{n}I }{\det\big(Du^\top \!Du \big)^{1/n} }\right) \nonumber\\
=&\ K_P(Du).
\end{align}
The desired identity follows. \qed
\section{Variational Structure of Optimal $\infty$-Quasiconformal Immersions.} \label{section5}
We begin by introducing a minimality notion of vector-valued Calculus of Variations in $L^\infty$ for the supremal dilation functional \eqref{1.2}. Let $u : \Omega \subseteq \mathbb{R}^n \longrightarrow \mathbb{R}^N$ be an immersion in $C^1(\Omega)^N$. In view of \eqref{3.1}, we have the identity
\begin{equation}}\newcommand{\eeq}{\end{equation}
K_P(Du)\ = \ \Bigg(2\frac{Du \big(Du^\top\! Du \big)^{-1}}{\det\big(Du^\top\! Du\big)^{1/n}} \Bigg) S\big(Du^\top\! Du \big).
\eeq
Generally, the rank of $K_P(Du)$ may not be constant throughout $\Omega$, although by assumption $\textrm{rk}(Du)= \textrm{rk}(Du^\top\! Du) \equiv n$, because possibly $\textrm{rk}(S(Du^\top\! Du))<n$ on certain regions of $\Omega$. We set
\begin{align} \label{5.1a}
\Omega_k \ :=\ \textrm{int}\Big\{ \textrm{rk}\big(S(Du^\top\! Du)\big) \, = \, k \Big\}\ , \ \ \ k\, = \, 0,\, 1\, , ....\, , \, n,
\end{align}
where $``\textrm{int}"$ denotes topological interior. The $n+1$ open sets $\Omega_k$ are the \emph{``phases''} of the immersion $u$. Their complement in $\Omega$
\begin{equation}}\newcommand{\eeq}{\end{equation} \label{5.1b}
\S\ :=\ \Omega \setminus \left(\cup_0^n \Omega_k\right)
\eeq
is the set of \emph{``interfaces''} and is closed in $\Omega$ with empty interior. We will also need the \emph{``augmented phases"}
\begin{align} \label{5.3a}
\Omega^*_k \ :=\ \Big\{ \textrm{rk} \big(S(Du^\top\! Du)\big) \, = \, k \Big\}\ , \ \ \ k\, = \, 0,\, 1\, , ....\, , \, n.
\end{align}
Obviously, $\{\Omega_0^*,...,\Omega^*_n\}$ is a partition of $\Omega$ to disjoint phases and $\S$ can be written as $\S=\cup_0^n (\Omega^*_k \setminus \Omega_k)$. The extreme cases of $\Omega^*_0$ and $\Omega^*_n$ are particularly important. $\Omega^*_0$ is the \emph{conformality set} of the immersion and is closed in $\Omega$. Hence,
\begin{equation}}\newcommand{\eeq}{\end{equation}
\Omega^*_0 \ = \ \left\{ Du^\top \! Du =\frac{|Du|^2}{n}I\right\}.
\eeq
Similarly, by Corollary \ref{c7} that follows, if $u$ solves $K_P(Du) \otimes K_P(Du) :D^2u=0$, then $\Omega^*_n$ is the \emph{constant dilation set} of the immersion and coincides with $\Omega_n$:
\begin{equation}}\newcommand{\eeq}{\end{equation}
\Omega^*_n \ = \ \left\{ \frac{|Du|^2}{\det(Du^\top\! Du)^{1/n}}=const.\right\}.
\eeq
If $\Omega_n$ is not connected, then the constants may differ in connected cmponents.
\begin{definition}\label{def1} Let $u : \Omega \subseteq \mathbb{R}^n \longrightarrow \mathbb{R}^N$ be an immersion in $C^1(\Omega)^N$.
\medskip \noindent (i) We say that $u$ has \emph{Rank-One Locally Minimal Dilation} when for all compactly contained subdomains $D$ of $\Omega$, all functions $g$ over $D$ vanishing on $\partial D$ and all directions $\xi$, $u$ is a minimiser on $D$ with respect to essentially scalar variations $u+f\xi$:
\begin{equation}}\newcommand{\eeq}{\end{equation} \label{5.2}
\left.
\begin{array}{l}
D \subset \subset \Omega, \\
f\in C^1_0(D), \\
\xi \in \mathbb{S}^{N-1}
\end{array}
\right\} \ \ \Longrightarrow \ \
K_\infty(u,\Omega)\ \leq \ K_\infty(u+f\xi,\Omega).
\eeq
\[
\underset{\text{Figure 2.}}{\includegraphics[scale=0.24]{figure2,1}} \label{fig2}
\]
\noindent (ii) We say that \emph{$u(\Omega)$ has Minimally Distorted Area} when for all compactly contained subdomains $D$ \emph{off the interfaces}, all functions $h$ on $\bar{D}$ (not only vanishing on $\partial D$) and all vector fields $\nu$ along $u$ normal to $K_P(Du)$, $u$ is a minimiser on $D$ with respect to normal free variations $u+h\nu$:
\begin{equation}}\newcommand{\eeq}{\end{equation} \label{5.3}
\left.
\begin{array}{l}
D \subset \subset \Omega \setminus \S, \\
h\in C^1(\bar{D}), \\
\nu \in \Gamma([K_P(Du)]^\bot)
\end{array}
\right\} \ \ \Longrightarrow \ \
K_\infty(u,\Omega)\ \leq \ K_\infty(u+h \nu,\Omega).
\eeq
\[
\underset{\text{Figure 3.}}{\includegraphics[scale=0.24]{figure3,1}} \label{fig3}
\]
\noindent (iii) We call $u$ \emph{Minimal $\infty$-Quasiconformal Immersion} when $u$ is has Rank-One Locally Minimal Dilation with Minimally Distorted Area of $u(\Omega) \subseteq \mathbb{R}^N$.
\end{definition}
By employing the previous minimality notion, we have the next
\begin{theorem}}\newcommand{\et}{\end{theorem}[Variational Structure of Optimal $\infty$-Quasiconformal Immersions] \label{th1} Let $u : \Omega \subseteq \mathbb{R}^n \longrightarrow \mathbb{R}^N$ be an immersion in $C^2(\Omega)^N$. Then, if $u$ is Minimal $\infty$-Quasiconformal, it follows that $u$ solves
\begin{align}
K_P(Du) \otimes K_P(Du) :D^2u\ &= \ 0, \ \text{ on }\Omega,\\
[K_P(Du)]^\bot K_{PP}(Du):D^2u\ &= \ 0, \ \text{ on }\Omega\setminus \S, \label{5.9a}
\end{align}
where $\S$ is the set of interfaces of rank discontinuities of $S(Du^\top\! Du)$.
\et
We note that by the results of Section \ref{section6} that follows, in the case $n=2 \leq N$ Theorem \ref{th1} can be strengthend to the following
\begin{corollary}[2-Dimensional Optimal $\infty$-Quasiconformal Immersions] \label{cor11} Let $u : \Omega \subseteq \mathbb{R}^2 \longrightarrow \mathbb{R}^N$ be an immersion in $C^2(\Omega)^N$. If $u$ is Minimal $\infty$-Quasiconformal, it follows that $u$ is Optimal $\infty$-Quasiconformal.
\end{corollary}
The point in Corollary \ref{cor11} is that \eqref{5.9a} is satisfied on $\Omega$ and not only on $\Omega\setminus \S$. Actually, \emph{when $n=2$ then the set of interfaces is empty: $\S=\emptyset$}.
\medskip
The proof of Theorem \ref{th1} is split in two lemmas.
\begin{lemma}}\newcommand{\el}{\end{lemma} \label{l2} Let $u : \Omega \subseteq \mathbb{R}^n \longrightarrow \mathbb{R}^N$ be an immersion in $C^2(\Omega)^N$. If $u$ has Rank-One Locally Minimal Dilation, then $u$ solves $K_P(Du) \otimes K_P(Du) :D^2u=0$ on $\Omega$.
\el
The proof of Lemma \ref{l2} follows by Theorem 2.1 in \cite{K1} and relates to Lemma 2.3 in \cite{K2}, but we present a simplified more direct proof for the reader's convenience.
\medskip \noindent \textbf{Proof of Lemma} \ref{l2}. Fix $x\in \Omega$, $0<\varepsilon < \textrm{dist}(x,\partial \Omega)$, $\delta>0$ and $\xi \in \mathbb{S}^{N-1}$. Choose $D:=\mathbb{B}_\varepsilon(x)$ and $f \in C^1_0(D)$ given by
\begin{equation}}\newcommand{\eeq}{\end{equation}
f(z)\ :=\ \frac{1}{2}\big(\varepsilon^2-|z-x|^2\big) .
\eeq
Since $\textrm{rk}(Du)=n$ on $\Omega$ and $Df(z)=-(z-x)$, by restricting $\delta$ sufficiently we obtain that $\textrm{rk}(Du+\delta \xi \otimes Df)=n$ on $\mathbb{B}_\varepsilon(x)$. By Taylor expansions of $K(Du)$ and $K(Du+\delta \xi \otimes Df)$ at $x$ we have
\begin{align} \label{5.7}
K(Du(z))\ = \ K(Du(x))\ +\ D\big(K(Du)\big)(x)^\top(z-x)\ + \ o(|z-x|),
\end{align}
as $z\rightarrow x$, and also by using that $D^2f=-I$ and $Df(x)=0$ we have
\begin{align} \label{5.8}
K\big((Du+\delta\xi \otimes Df)(z)\big)\ & = \ K\big((Du+\delta\xi \otimes Df)(x)\big) \nonumber\\
& \ \ \ \ +\ D\big(K(Du+ \delta\xi \otimes Df)\big)(x)^\top(z-x) \ + \ o(|z-x|)\nonumber\\
& = \ K(Du(x))\ +\ K_P(Du(x))^\top \big(D^2u(x)-\delta \xi\otimes I \big)(z-x) \\
&\ \ \ \ + \ o(|z-x|)\nonumber\\
& = \ K(Du(x))\, + \, \Big(D\big(K(Du)\big)^\top \! - \delta \xi^\top K_P(Du)\Big)(x) (z-x) \nonumber\\
&\ \ \ \ + \ o(|z-x|),\nonumber
\end{align}
as $z\rightarrow x$. By \eqref{5.7} we have the estimate
\begin{align} \label{5.9}
K_\infty \big(u,\mathbb{B}_\varepsilon(x)\big)\ & \geq \ K(Du(x)) \ +\ \max_{\{|z-x|\leq \varepsilon\}}\Big\{D\big(K(Du)\big)(x)^\top(z-x)\Big\}\ + \ o(\varepsilon) \nonumber\\
& = \ K(Du(x))\ +\ \varepsilon\big|D\big(K(Du)\big)(x)\big|\ + \ o(\varepsilon),
\end{align}
as $\varepsilon \rightarrow 0$, and also by \eqref{5.8} we have
\begin{align} \label{5.10}
K_\infty \big(u+\delta f\xi ,\mathbb{B}_\varepsilon(x)\big) \ & \leq \ K(Du(x)) \ + \max_{\{|z-x|\leq \varepsilon\}}\Big\{D\big(K(Du)\big)^\top \nonumber\\
&\ \ \ \ - \delta \xi^\top K_P(Du)\big)(x)(z-x)\Big\} \ + \ o(\varepsilon) \\
& = \ K(Du(x))\ +\ \varepsilon\big|D\big(K(Du)\big) - \, \delta \xi^\top K_P(Du)\big|(x)\ + \ o(\varepsilon) , \nonumber
\end{align}
as $\varepsilon \rightarrow 0$. Then, since $u$ has rank-one locally minimal dilation, by \eqref{5.9} and \eqref{5.10} we have
\begin{align} \label{5.11}
0\ & \leq \ K_\infty \big(u+\delta f\xi ,\mathbb{B}_\varepsilon(x)\big) \, - \, K_\infty \big(u,\mathbb{B}_\varepsilon(x)\big) \nonumber\\
&\leq \ \varepsilon\Big(\big|D\big(K(Du)\big) -\, \delta \xi^\top K_P(Du)\big|\ -\ \big|D\big(K(Du)\big)\big|\Big)(x)\ + \ o(\varepsilon),
\end{align}
as $\varepsilon \rightarrow 0$. Suppose first $D\big(K(Du)\big)(x)=0$. Since
\begin{align} \label{5.11a}
K_P(Du) \otimes K_P(Du) : D^2u \ = \ K_P(Du) D \big(K(Du)\big)
\end{align}
we obtain that $\big(K_P(Du) \otimes K_P(Du): D^2u\big)(x) = 0$ as desired. If $D\big(K(Du)\big)(x)\neq0$, then Taylor expansion of the function
\begin{equation}}\newcommand{\eeq}{\end{equation}
p\ \mapsto\ \big|D\big(K(Du)\big)(x) +\, p\big| - \big|D\big(K(Du)\big)(x)\big|
\eeq
at $p_0=0$ and evaluated at $p=-\, \delta \xi^\top K_P(Du(x))$, \eqref{5.11} implies after letting $\varepsilon \rightarrow 0$ that
\begin{equation}}\newcommand{\eeq}{\end{equation} \label{5.12a}
0\ \leq \ - \delta \, \xi^\top K_P(Du(x))\left(\frac{D\big(K(Du)\big)}{\big|D\big(K(Du)\big)\big|} \right)(x) \ + \ o(\delta).
\eeq
By letting $\delta \rightarrow 0$ in \eqref{5.12a} we obtain $\xi^\top \big(K_P(Du) \otimes K_P(Du): D^2u\big)(x) \geq 0$ for any direction $\xi$. Since $\xi$ and $x$ are arbitrary we get $K_P(Du) \otimes K_P(Du): D^2u = 0$ on $\Omega$. The lemma follows. \qed
\begin{lemma}}\newcommand{\el}{\end{lemma} \label{l3} Let $u : \Omega \subseteq \mathbb{R}^n \longrightarrow \mathbb{R}^N$ be an immersion in $C^2(\Omega)^N$ with Minimally Distorted Area of $u(\Omega)$. Then, $u$ solves $[K_P(Du)]^\bot K_{PP}(Du) : D^2u=0$ on $\Omega \setminus \S$.
\el
\medskip \noindent \textbf{Proof of Lemma} \ref{l3}. Fix $x\in \Omega \setminus \S$. Then, $x$ belongs to some phase $\Omega_k$ of constant rank and $\textrm{rk}\big(S(Du^\top\! Du)\big)\equiv k$ thereon. We choose $0<\varepsilon<\frac{1}{2}\textrm{dist}(x,\partial \Omega_k)$ and $0<\delta<1$. By the Rank Theorem (see e.g.\ \cite{N}) and application of the Gram-Schmidt procedure to a local frame field adapted to the immersion near $u(x)$, we can construct a local frame of sections $\{\nu^1,...,\nu^{N-k}\}$ spanning $\Gamma([K_P(Du)]^\bot,\mathbb{B}_{2\varepsilon}(x))$ for $\varepsilon$ small enough. Let $\nu$ be a linear combination of these sections and choose an $h \in C^1\big(\overline{\mathbb{B}_\varepsilon(x)} \big)$. Since $\textrm{rk}(Du)=n$ on $\Omega$, by restricting $\delta$ sufficiently we obtain $\textrm{rk}\big(D(u+\delta h\nu)\big)=n$ on $\mathbb{B}_\varepsilon(x)$. By differentiating $\nu^\top K_P(Du) = 0$ we obtain
\begin{equation}}\newcommand{\eeq}{\end{equation} \label{5.13}
D_k\nu_\alpha K_{P_{\alpha i}}(Du)\ = \ -\nu_\alpha K_{P_{\alpha i}P_{\beta j}}(Du) D_{kj}^2u_{\beta}
\eeq
and by putting $i=k$ and summing, we get
\begin{equation}}\newcommand{\eeq}{\end{equation} \label{5.14}
D_i\nu_\alpha K_{P_{\alpha i}}(Du)\ = \ -\nu_\alpha K_{P_{\alpha i}P_{\beta j}}(Du) D_{ij}^2u_{\beta}
\eeq
that is
\begin{equation}}\newcommand{\eeq}{\end{equation} \label{5.14a}
D\nu : K_P(Du) \ =\ -\nu^\top K_{PP}(Du) : D^2u.
\eeq
By Taylor expansion of the dilation and usage of $\nu^\top K_P(Du)=0$, we obtain
\begin{align} \label{5.16}
K\big(D(u+\delta h\nu)\big)\ &=\ K(Du)\ + \ K_P\big(Du):D(\delta h\nu) \ + \ o(\delta|h\nu|) \nonumber\\
&=\ K(Du)\ + \ \delta K_P\big(Du): \big(hD\nu \, +\, \nu \otimes Dh \big) \ + \ o(\delta) \\
&=\ K(Du)\ + \ \delta \Big(hD\nu: K_P\big(Du) \, +\, \nu^\top K_P\big(Du) Dh\Big)\ + \ o(\delta) \nonumber\\
&=\ K(Du)\ + \ \delta hD\nu: K_P\big(Du)\ + \ o(\delta) \nonumber
\end{align}
as $\delta \rightarrow 0$. By \eqref{5.16} and \eqref{5.14a} we have
\begin{equation}}\newcommand{\eeq}{\end{equation} \label{5.18}
K\big(D(u+\delta h\nu)\big)\ = \ \ K(Du) \ - \ 2\delta h\big(\nu^\top K_{PP}(Du) : D^2u\big) \ + \ o(\delta) ,
\eeq
as $\delta \rightarrow 0$. Hence, since $u(\Omega)$ has minimally distorted area, by \eqref{5.18} we have
\begin{align} \label{5.19}
K_\infty \big( u, \mathbb{B}_\varepsilon(x)\big)\ &\leq \ K_\infty \big( u+\delta h\nu, \mathbb{B}_\varepsilon(x)\big) \nonumber\\
&=\ \sup_{\mathbb{B}_\varepsilon(x)} \Big\{ K(Du) \ - \ 2\delta h \big(\nu^\top K_{PP}(Du) : D^2u\big)\ + \ o(\delta) \Big\}
\end{align}
as $\delta \rightarrow 0$, which gives
\begin{align} \label{5.20}
K_\infty \big( u, \mathbb{B}_\varepsilon(x)\big)\ &\leq \sup_{\mathbb{B}_\varepsilon(x)} K(Du)\ -\ 2\delta\min_{\overline{\mathbb{B}_\varepsilon(x)}} \Big\{ h \big(\nu^\top K_{PP}(Du) : D^2u\big) \Big\} \ + \ o(\delta) \nonumber\\
&= K_\infty \big( u, \mathbb{B}_\varepsilon(x)\big)\ -\ 2\delta\min_{\overline{\mathbb{B}_\varepsilon(x)}} \Big\{ h \big(\nu^\top K_{PP}(Du) : D^2u\big) \Big\} \ + \ o(\delta) .
\end{align}
Hence, by passing to the limit as $\delta \rightarrow 0$, \eqref{5.20} gives
\begin{equation}}\newcommand{\eeq}{\end{equation} \label{5.21}
\min_{\overline{\mathbb{B}_\varepsilon(x)}} \Big\{ h\big(\nu^\top K_{PP}(Du) : D^2u\big) \Big\}\ \leq \ 0.
\eeq
We now choose as $h$ the constant function
\begin{equation}}\newcommand{\eeq}{\end{equation}
h\ :=\ \textrm{sgn} \left( \nu^\top K_{PP}(Du) : D^2u \right)(x)
\eeq
and by \eqref{5.21} as $\varepsilon \rightarrow 0$ we get $\big|\nu^\top K_{PP}(Du) : D^2u\big|(x)=0$. Since $\nu$ is an arbitrary normal section and $x$ is an arbitrary point on $\Omega \setminus \S$, we get $([K_P]^\bot K_{PP})(Du) : D^2u=0$ on $\Omega \setminus \S$ and the lemma follows. \qed
\section{Geometric Properties of Optimal $\infty$-Quasiconformal Immersions.} \label{section6}
\subsection{Geometric Form of the PDE System.} \label{subsection6.1} In this subsection we show that system \eqref{1.1} decouples to two system one normal to to other which can be written in geometric rather coordinate-free fashion, at least within the phases of solutions whereon the coefficients of the system are continuous.
\begin{proposition} \label{l5} Let $u : \Omega \subseteq \mathbb{R}^n \longrightarrow \mathbb{R}^N$ be an immersion in $C^2(\Omega)^N$. If $K$ is the dilation \eqref{1.3} and its derivatives are given by \eqref{3.1} and \eqref{3.1A}, then the Aronsson system
\begin{equation}}\newcommand{\eeq}{\end{equation} \label{5.25}
Q_\infty u \ =\ \Big(K_P \otimes K_P + [K_P]^\bot K_{PP}\Big)(Du):D^2u\ = \ 0
\eeq
is equivalent on each phase $\Omega_k = \textrm{int}\{\textrm{rk}(S(Du^\top\! Du)) = k\}$ to the pair of systems
\begin{align}
S(\bold{G})D\big(\textrm{tr}(\bold{G})\big)\ = \ 0, \label{5.26}\\
\mathbb{B}^\bot : \big(\textrm{tr}(\bold{G})\big)_{P} \ = \ 0, \label{5.27}
\end{align}
where $\bold{G}$ is given by \eqref{1.1}, $g=Du^\top\! Du$ is the Riemannian metric on $u(\Omega)$, $S$ is the Ahlfors operator and $\mathbb{B}^\bot$ is the ``generalized 2nd fundamental form'', defined for every local normal section $\nu \in \Gamma([K_P(Du)]^\bot,D)$ over $D\subseteq \Omega\setminus \S$ as $(\mathbb{B}^\bot) _\nu:= D\nu$. Moreover, \eqref{5.26} is valid on all of $\Omega$.
\end{proposition}
We observe that system \eqref{5.26} can also be written as
\begin{equation}}\newcommand{\eeq}{\end{equation}
S(g)D\left(\frac{\textrm{tr}(g)}{\det(g)^{1/n}}\right)\ = \ 0
\eeq
and hence depends only on the metric structure of the immersion. System \eqref{5.26} is the ``tangential system". On the other hand, \eqref{5.27} can be written also as
\begin{equation}}\newcommand{\eeq}{\end{equation}
\mathbb{B}^\bot :\left(\frac{\textrm{tr}(g)}{\det(g)^{1/n}}\right)_P \ = \ 0
\eeq
and depends on the exterior geometry as well, the ``shape" of $u(\Omega)$. System \eqref{5.27} is the ``normal system".
\medskip \noindent \textbf{Proof of Proposition} \ref{l5}. By applying the orthogonal projections \eqref{4.3} and \eqref{4.4} to \eqref{5.25}, we decouple it to
\begin{align}
K_P(Du) \otimes K_P(Du) :D^2u\ = \ 0, \label{5.30}\\
[K_P(Du)]^\bot K_{PP}(Du) :D^2u\ = \ 0. \label{5.31}
\end{align}
In view of \eqref{3.1}, we rewrite \eqref{5.30} as
\begin{equation}}\newcommand{\eeq}{\end{equation}
Dug^{-1}S(g)D\big(K(Du)\big)\ = \ 0.
\eeq
By using that $K(Du)=\textrm{tr}(\bold{G})$ and that $Dug^{-1}$ has constant rank equal to $n$ and hence is left invertible, we obtain
\begin{equation}}\newcommand{\eeq}{\end{equation} \label{5.33}
\big(Dug^{-1}\big)^{-1}Dug^{-1}S(g)D\big(\textrm{tr}(\bold{G})\big)\ = \ S(g)D\big(\textrm{tr}(\bold{G})\big)\ = \ 0.
\eeq
Since $g=\det(g)^{1/n}\bold{G}$, system \eqref{5.33} leads to \eqref{5.26}. To obtain \eqref{5.27}, we observe that \eqref{5.31} is equivalent to
\begin{equation}}\newcommand{\eeq}{\end{equation} \label{5.34}
\nu^\top K_{PP}(Du) :D^2u\ = \ 0,
\eeq
for all local normal sections $\nu \in \Gamma([K_P(Du)]^\bot,D)$, $D\subseteq \Omega \setminus \S$. By \eqref{5.14a}, equation \eqref{5.34} is equivalent to $- D\nu:K_P(Du)= 0$. Hence, we rewrite it as
\begin{equation}}\newcommand{\eeq}{\end{equation} \label{5.35}
- D\nu:\big(\textrm{tr}(\bold{G})\big)_P\ = \ 0.
\eeq
By definition of $\mathbb{B}^\bot$, system \eqref{5.35} leads to \eqref{5.27} and the proposition follows. \qed
\begin{remark} We will later show that the 2-dimensional case $n=2\leq N$ is prominent. In this case, interfaces of discontinuities of the coefficients disappear and $\mathbb{B}^\bot$ conicides with the standard 2nd fundamental form.
\end{remark}
\begin{corollary}[Constant dilation on $\Omega_n$] \label{c7} Let $u :\Omega \subseteq \mathbb{R}^n \longrightarrow \mathbb{R}^N$ be an immersion in $C^2(\Omega)^N$ solving $K_P(Du) \otimes K_P(Du):D^2u=0$. Then, on the $n$-phase $\Omega_n$ given by \eqref{5.1a}, $u$ has constant dilation on each connected component of $\Omega_n$.
\end{corollary}
\medskip \noindent \textbf{Proof of Corollary} \ref{c7}. By \eqref{5.1a} and \eqref{5.33}, we have that $S(g)$ is invertible on $\Omega_n$ and consequently we get $D\big(K(Du)\big)=0$ on $\Omega_n$.
\qed
\subsection{A Geometric Property of Interfaces of Solutions.} \label{subsection6.2} We begin with a differential identity valid \emph{on the interfaces} of discontinuity, under a local regularity assumption on the interface. We assume only $C^1$ regularity, but we allow for possibly complicated topology and self-intersections.
\begin{proposition}[Covariant Derivatives on Interfaces] \label{pr3} Let $u : \Omega \subseteq \mathbb{R}^n \longrightarrow \mathbb{R}^N$ be an immersion in $C^2(\Omega)^N$. Suppose the set of interfaces $\S$ inside $\Omega$ given by \eqref{5.1b} contains a $C^1$ immersed submanifold $M$ and let $\nabla^M$ be its Riemannian gradient. Then, we have the identity
\begin{align} \label{5.41}
\nabla^M\big([K_P(Du)]^\bot\big): K_P(Du)\ =& \ -\big([K_P]^\bot K_{PP}\big)(Du):D^2u\nonumber\\
&\ +\ \big([K_P]^\bot K_{PP}\big)(Du) : \nabla^{M^\bot}\! Du ,
\end{align}
valid on $M\subseteq \S$, where $\nabla^{M^\bot}\! $ is the orthogonal complement of $\nabla^M$ in $\mathbb{R}^n$.
\end{proposition}
\[
\underset{\text{Figure 4.}}{\includegraphics[scale=0.24]{figure4,1}} \label{fig4}
\]
\begin{remark} The point in \eqref{5.41} is that $[K_P(Du)]^\bot$ has covariantly differentiable contraction with $K_P(Du)$ along (part of the interface) $M$, \emph{without having assumed that $S(Du^\top\! Du)$ has constant rank on $M$} and hence without having assumed that $[K_P(Du)]^\bot$ is differentiable on $M\subseteq \Omega$.
\end{remark}
\medskip \noindent \textbf{Proof of Proposition} \ref{pr3}. By assuming as we can that $M$ is immersed by the inclusion into $\Omega$, we fix a point $p \in M \subseteq \Omega$ and consider coordinates near $p$ adapted to the immersion. Let $\{\nabla^M_1,...,\nabla^M_n\}$ denote the $n$ components of $\nabla^M$ with respect to the standard coordinates of $\mathbb{R}^n$. By differentiating covariantly near $p$ the identity
\begin{equation}}\newcommand{\eeq}{\end{equation}
[K_P(Du)]^\bot_{\alpha \beta} K_{P_{\beta j}}(Du)\ =\ 0
\eeq
we obtain
\begin{align} \label{5.43}
\nabla^M_i\big( [K_P(Du)]_{\alpha \beta}^\bot\big)K_{P_{\beta j}}(Du)\ &=- \ [K_P(Du)]_{\alpha \beta}^\bot \nabla^M_i \big(K_{P_{\beta j}}(Du)\big) \nonumber\\
&=- \ [K_P(Du)]_{\alpha \beta}^\bot K_{P_{\beta j}P_{\gamma k}}(Du)\nabla^M_i D_ku_\gamma.
\end{align}
By applying the expansion $\nabla^M = D - \nabla^{M^\bot}$, putting $i=j$ and summing, \eqref{5.43} implies \eqref{5.41} and the proposition follows. \qed
The previous identity readily implies the next
\begin{corollary} \label{c6} In the setting of Proposition \ref{pr3} above, if $u$ solves the system $([K_P]^\bot K_{PP})(Du) :D^2u=0$, then we have
\begin{equation}}\newcommand{\eeq}{\end{equation} \label{5.45}
\nabla^M\big([K_P(Du)]^\bot\big): K_P(Du) \ =\ \big( [K_P]^\bot K_{PP}\big)(Du) :\nabla^{M^\bot}\! Du.
\eeq
In particular, the vector field
\begin{equation}}\newcommand{\eeq}{\end{equation}
\nabla^M\big([K_P(Du)]^\bot\big):K_P(Du)\ :\ M \longrightarrow \mathbb{R}^N
\eeq
is ``normal"to $u(M)$, namely, it is valued in $[K_P(Du)]^\bot$:
\begin{equation}}\newcommand{\eeq}{\end{equation} \label{6.18}
[K_P(Du)]^\top \Big(\nabla^M\big([K_P(Du)]^\bot\big):K_P(Du)\Big) \ = \ 0.
\eeq
\end{corollary}
\medskip \noindent \textbf{Proof of Corollary} \ref{c6}. Since the immersion $u$ solves $[K_P(Du)]^\bot K_{PP}(Du):D^2u=0$, \eqref{5.41} gives \eqref{5.45}. By applying the projection $[K_P(Du)]^\top$ to the latter, \eqref{6.18} follows. Hence, the vector field $\nabla^M\big([K_P(Du)]^\bot\big):K_P(Du)$ equals its projection on $[K_P(Du)]^\bot$ and the corollary follows. \qed
\medskip
\section{Sufficiency of $K_P(Du) \otimes K_P(Du) : D^2u =0$ for Rank-One Locally Minimal Dilation When $n=2\leq N$.} \label{section7}
In this section we show that in the case of 2-dimensional immersions when $n=2\leq N$, the tangential system $K_P(Du) \otimes K_P(Du) :D^2u=0$ is sufficient for the minimality notion of Rank-One Locally Minimal Dilation. This follows as a corollary of the fact that when $n=2$, solutions of this system necessarily have constant dilation. In particular, the rank of $S(Du^\top \!Du)$ is constant throughout the domain and interfaces of discontinuity on the coefficents of the normal system $([K_P]^\bot K_{PP})(Du):D^2u=0$ disappear.
As a corollary, we show that when $n=N=2$, the conjecture of Capogna-Raich in \cite{CR} on the sufficiency of system $(K_P\otimes K_P)(Du) :D^2u=0$ for their stronger local minimality notion is false. This follows by Example \ref{ex1} below in which we construct a diffeomorphism with constant dilation on a domain of the plane which has the same boundary values with the identity.
\begin{lemma}[Constant dilation] \label{p7} Let $u : \Omega \subseteq \mathbb{R}^n \longrightarrow \mathbb{R}^N$ be an immersion in $C^2(\Omega)^N$ which solves $K_P(Du) \otimes K_P(Du) :D^2u=0$ on $\Omega$. Suppose $\Omega$ is connected and let $\Omega^*_0,..., \Omega^*_n$ be the augmented $n+1$ phases of the immersion given by \eqref{5.3a}. Then:
\noindent (i) $S(Du^\top\!Du)$ has nowhere rank equal to one:
\begin{equation}}\newcommand{\eeq}{\end{equation}
\Omega^*_1\, =\ \emptyset.
\eeq
\noindent (ii)
If moreover $n=2$, then $\Omega^*_0 \in \{\emptyset, \Omega\}$. That is, $\Omega^*_0$ is either empty or equals the whole $\Omega$. Hence, $u$ has constant dilation everywhere on $\Omega$:
\begin{equation}}\newcommand{\eeq}{\end{equation}
K(Du)\ \equiv \ k \ \geq \ 2.
\eeq
If it happens that $\Omega^*_0\neq \emptyset$, then $k=2$ and in this case $u$ is conformal on $\Omega$.
\end{lemma}
\medskip \noindent \textbf{Proof of Lemma} \ref{p7}. $(i)$ On $\Omega^*_1$ we have $\textrm{rk}(S(Du^\top \!Du))=1$ and also $S(Du^\top \!Du)=S(Du^\top \!Du)^\top$. Since $S(Du^\top \!Du)$ is a rank-one symmetric matrix, there exist $\lambda : \Omega^*_1 \longrightarrow \mathbb{R}$ and $a : \Omega^*_1 \longrightarrow \mathbb{R}^n$ such that $\lambda>0$, $|a|=1$ and $S(Du^\top \!Du)= \lambda\, a \otimes a$. Hence, we obtain
\begin{align}
\lambda\ = \ \lambda \, |a|^2 \ = \ \textrm{tr}(\lambda \, a\otimes a)\ =\ \textrm{tr} \big(S(Du^\top \!Du)\big)\ = \ 0.
\end{align}
Consequently, $\Omega^*_1 = \emptyset$.
$(ii)$ When $n=2$, by $(i)$ we have that $\Omega =\Omega^*_0 \cup \Omega^*_2$. On $\Omega^*_0$ the immersion $u$ is conformal. By Corollary \ref{c7}, on $\Omega^*_2$ $u$ has constant dilation. Hence, $u$ has constant dilation on each connected component of $\Omega^*_0 \cup \Omega^*_2=\Omega$. This means that $K(Du)$ is piecewise constant on $\Omega$. By assumption, $\Omega$ is connected and also $K(Du) \in C^0(\Omega)$. As a result, necessarily either $\Omega^*_0 =\emptyset$ or $\Omega^*_0=\Omega$. If $\Omega^*_0 \neq \emptyset$, then $u$ is conformal on $\Omega$. The lemma follows. \qed
\medskip
\begin{proposition}[Equivalences in the 2-Dimensional case] \label{c8} Let $u : \Omega \subseteq \mathbb{R}^2 \longrightarrow \mathbb{R}^N$ be an immersion in $C^2(\Omega)^N$. Then, the following are equivalent:
\medskip
\noindent (i) $u$ has Rank-One Locally Minimal Dilation on $\Omega$.
\medskip
\noindent (ii) $u$ solves $K_P(Du) \otimes K_P(Du) :D^2u=0$ on $\Omega$.
\medskip
\noindent (iii) $u$ has constant dilation on connected components of $\Omega.$
\end{proposition}
\medskip \noindent \textbf{Proof of Proposition} \ref{c8}. The implications $(i)\Rightarrow (ii)$ and $(ii)\Rightarrow (iii)$ have already been estabished, so it suffices to prove $(iii)\Rightarrow (i)$. For, suppose $u$ has constant dilation on connected components of $\Omega$. Fix $D\subset \subset \Omega$, $f\in C^1_0(D)$ and $\xi \in \mathbb{S}^{N-1}$. We may assume $D$ is connected and that $\textrm{rk}(Du+\xi \otimes Df)=n$ on $D$. Then, since $f|_{\partial D}\equiv 0$, there exists an interior critical point $\bar{x}\in D$ of $f$. By using that $Df(\bar{x})=0$, we estimate
\begin{align}
K_\infty (u+f\xi, D)\ &= \ \sup_D K\big(Du \, +\, \xi \otimes Df\big) \nonumber\\
& \geq \ K\big(Du(\bar{x})\, +\, \xi\otimes Df(\bar{x})\big) \nonumber\\
&=\ K(Du(\bar{x})) \\
&=\ \sup_D K(Du) \nonumber\\
&=\ K_\infty(u,D). \nonumber
\end{align}
Hence, $u$ has rank-one locally minimal dilation and the proposition follows.
\qed
\medskip
Directly from Proposition \ref{c8} we obtain the following
\begin{corollary}[Absence of Interfaces in the 2-Dimensional case] \label{c9} Let $u : \Omega \subseteq \mathbb{R}^2 \longrightarrow \mathbb{R}^N$ be an immersion in $C^2(\Omega)^N$ which solves $Q_\infty u=0$ on the connected set $\Omega$. Then the rank of $S(Du^\top\!Du)$ is constant on $\Omega$, and equals either 0 or 2. If $rk\big(S(Du^\top\!Du)\big)=0$ then $u$ satisfies
\begin{align}
&K(Du)\ \equiv \ 2, \\
&K_{PP}(Du) :D^2u\ = \ 0.
\end{align}
The condition $K(Du) \equiv 2$ is equivalent to Conformality: $Du^\top\! Du=\frac{1}{n}|Du|^2I $. If $rk\big(S(Du^\top\!Du)\big)=2$, then $u$ satisfies
\begin{align}
&K(Du)\ \equiv \ const.\ > \ 2, \\
&[Du]^\bot K_{PP}(Du) :D^2u\ = \ 0.
\end{align}
\end{corollary}
\begin{remark} Since the dilation \eqref{1.3} fails to be convex, it seems that sufficiency of the normal system $[K_P(Du)]^\bot K_{PP}(Du) :D^2u=0$ for minimally distorted area does not hold. In particular, the respective convexity arguments used in the case of the $\infty$-Laplacian in \cite{K2} fail.
\end{remark}
The following example certifies that the variational notion of rank-one locally minimal dilation is genuinely weaker than the respective notion of ``locally minimal dilation" used in \cite{CR}, where general vector-valued variations with the same boundary values are considered.
\begin{example}[Rank-One Locally Minimal Dilation is Strictly Weaker Notion] \label{ex1} (cf. \cite{CR}, Cor 1.6(2))
Let $\Omega:= \mathbb{D}^2\setminus\{0\}\subseteq \mathbb{R}^2$ be the punctured unit disc on the plane. Fix $\gamma>-1$ and consider the maps $u,u^\gamma : \Omega \longrightarrow \Omega$ where $u(x):=x$ and $u^\gamma(x):=|x|^\gamma x$. Then, $u=u^\gamma$ on $\partial \Omega = \mathbb{S}^1 \cup \{0\}$ and $u$ is conformal on $\Omega$ while $u^\gamma$ is quasiconformal but has constant strictly greater dilation:
\begin{equation}}\newcommand{\eeq}{\end{equation}
K(Du)\ \equiv \ 2 \ < \ 2\, +\, \frac{\gamma^2}{\gamma +1}\ \equiv \ K(Du^\gamma).
\eeq
For completeness, we provide some details of our calculations. We readily have
\begin{equation}}\newcommand{\eeq}{\end{equation}
Du^\gamma(x)\ =\ |x|^\gamma\Big(I+\gamma \frac{x}{|x|} \otimes \frac{x}{|x|} \Big)
\eeq
and by setting $\frac{x}{|x|}=(a,b)^\top$ we obtain
\begin{equation}}\newcommand{\eeq}{\end{equation}
Du^\gamma(x)\ = \ |x|^\gamma
\left[
\begin{array}{cc}
1+\gamma a^2 & \gamma a b\\
\gamma ba & 1+\gamma b^2
\end{array}
\right] .
\eeq
By using that $a^2 + b^2=1$, we have
\begin{align}
K(Du^\gamma)\ &= \ \frac{|Du^\gamma|^2}{(\det(Du^\gamma)\det(Du^\gamma))^{1/2}} \nonumber\\
&=\ \frac{|x|^{2\gamma} \big[(1\, +\, \gamma a^2)^2 \, +\, (1+\gamma b^2)^2 \,+ \, 2(\gamma ab)^2 \big]}{|x|^{2\gamma} \big[ (1\, +\, \gamma a^2)(1+\gamma b^2)-(\gamma ab)^2 \big]}\\
&= \ 2\, +\, \frac{\gamma^2}{\gamma +1} . \nonumber
\end{align}
As a conclusion, in view of Corollary \ref{c8}, $u^\gamma$ has rank-one minimal dilation over $\Omega$, but does not have minimal dilation over $\Omega$ since it has the same boundary values on $\partial \Omega$ with a conformal map. If moreover $\gamma >0$, then both $u,u^\gamma$ are in $C^1(\overline{\Omega})^2$.
\end{example}
\subsection{On the sufficiency of $K_P(Du)\otimes K_P(Du) :D^2u=0$ for rank-one locally minimal dilation in the case of dimensions $3\leq n\leq N$.}
In this subsection we loosely discuss the much more complicated case of dimensions $n\geq 3$. In this case results are less sharp since Lemma \ref{p7} generally fails when $n>2$.
To begin with, let $u : \Omega \subseteq \mathbb{R}^3 \longrightarrow \mathbb{R}^N$ be an immersion in $C^2(\Omega)^N$. Obviously, we have $\textrm{rk}(Du)=3\leq N$. By Lemma \ref{l1} and Proposition \ref{l5}, we may rewrite system $K_P(Du)\otimes K_P(Du) :D^2u=0$ as
\begin{equation}}\newcommand{\eeq}{\end{equation} \label{6.15}
g^{-1}S(g)D\big(K(Du)\big)\ = \ 0,
\eeq
where $g= Du^\top\! Du$. We recall that in the case of $n=2$, Lemma \ref{p7} asserts that $S(g)$ either has two nonzero opposite eigenvalues (and hence has a saddle structure), or it vanishes. In the two-dimensional case this covers all possible values of rank and it follows that the dilation is constant throughout connected domains.
When $n=3$, Lemma \ref{p7} still works with the same proof, but now asserts only that
\medskip
\noindent(i) there is no one-dimensional phase $\Omega^*_1$, and
\medskip
\noindent (ii) $\Omega = \Omega^*_0 \cup \Omega^*_2 \cup \Omega^*_3$ with $K(Du)$ constant on connected components of the set $\Omega^*_0 \cup \Omega^*_3$.
\medskip
\noindent When $n=3$ no information is provided for the two-dimensional phase $\Omega^*_2$. Let us analyse more closely what happens in this case when $\Omega^*_2\neq \emptyset$ and nontrivial interfaces of discontinuities may appear, where $\Omega^*_2 = \{\textrm{rk}(S(g))=2\}$. Let $0< \lambda_1 \leq \lambda_2 \leq \lambda_3 $ be the eigenvalue functions on $\Omega$ of the Riemannian metric $g$. Then, the spectrum of $S(g)$ is
\begin{align} \label{6.16}
\sigma\big(S(g)\big)\ &=\ \sigma(g)\ - \ \frac{\textrm{tr}(g)}{3} \nonumber\\
&= \ \left\{\lambda_1- \frac{\lambda_1 +\lambda_2 +\lambda_3}{3},\, \lambda_2- \frac{\lambda_1 +\lambda_2 +\lambda_3}{3},\, \lambda_3- \frac{\lambda_1 +\lambda_2 +\lambda_3}{3} \right\} \\
&= \ \left\{ \frac{2\lambda_1 -\lambda_2 -\lambda_3}{3},\, \frac{2 \lambda_2 - \lambda_3 - \lambda_1}{3},\, \frac{2\lambda_3 -\lambda_2 -\lambda_1}{3} \right\}. \nonumber
\end{align}
We distinguish the following cases:
(a) $0<\lambda_1=\lambda_2=\lambda_3=:\lambda$. Then, by \eqref{6.16} we have that $S(g)=0$.
(b) $0<\lambda_1=\lambda_2=:\lambda < \lambda_3$. Then, by \eqref{6.16} we have that
\begin{equation}}\newcommand{\eeq}{\end{equation}
\sigma\big(S(g)\big)\ =\ \{-\mu,-\mu,2\mu\}
\eeq
where $\mu := \frac{\lambda_3 - \lambda}{3}>0$. By the Spectral Theorem, there is an orthonormal frame $\{a_1,a_2,a_3\}$ of $\mathbb{R}^3$ such that
\begin{equation}}\newcommand{\eeq}{\end{equation}
S(g)\ = \ -\mu \big(a_1 \otimes a_1 \, +\, a_2\otimes a_2\big) \ +\ 2\mu \, a_3 \otimes a_3
\eeq
and $S(g)$ has rank three.
(c) $0<\lambda_1<\lambda_2 = \lambda_3$. Again as before $S(g)$ has rank three.
(d) $0<\lambda_1<\lambda_2 < \lambda_3$. This is the only case where rank equal to two may appear. Since $\lambda_2 +\lambda_3 >2\lambda_1$ and $\lambda_1 +\lambda_2<2\lambda_3$, we get
\begin{equation}}\newcommand{\eeq}{\end{equation}
\mu_1\, :=\, \frac{2\lambda_1 -\lambda_2 -\lambda_3}{3}\, <\, 0\ , \ \ \ \mu_3\, :=\, \frac{2\lambda_3 -\lambda_2 -\lambda_1}{3} \, >\, 0
\eeq
but it may happen that
\begin{equation}}\newcommand{\eeq}{\end{equation}
\mu_2\, :=\, \frac{2 \lambda_2 - \lambda_3 - \lambda_1}{3}
\eeq
vanishes, like for example in the extremal quasiconformal map $u : \mathbb{R}^3 \longrightarrow \mathbb{R}^3$ given by $u(x,y,z):=(e^x,\sqrt{2}y e^x, \sqrt{3}z e^x)^\top$. We have
\begin{equation}}\newcommand{\eeq}{\end{equation}
Du^\top\! Du\, (x,y,z) \ = \ e^{2x}\left[
\begin{array}{ccc}1 & 0 & 0\\
0 & 2 & 0 \\
0 & 0 & 3
\end{array}
\right]
\eeq
and hence we get $(\lambda_1,\lambda_2,\lambda_3)=(e^{2x},2e^{2x},3e^{2x})$, which implies $\mu_2=0$. Generally, the set of interfaces of a three-dimensional optimal quasiconformal map is given by
\begin{equation}}\newcommand{\eeq}{\end{equation}
\S\ =\ \partial \{ \mu_2 \, =\, 0\}
\eeq
and the two-dimensional phase of $u$ is given by
\begin{equation}}\newcommand{\eeq}{\end{equation}
\Omega_2\ =\ \textrm{int}\{\mu_2 = 0\} .
\eeq
Since $S(g)$ is traceless, the condition $\textrm{tr}(S(g))=0$ implies $-\mu_1=\mu_3=:\mu >0$ and hence $\sigma\big(S(g)\big)=\{-\mu,0,\mu\}$. By the Spectral Theorem, there exists an orthonormal frame $\{a,b,c\}$ of $\mathbb{R}^3$ such that
\begin{equation}}\newcommand{\eeq}{\end{equation}
S(g)\ = \ - \mu\, \big(a \otimes a \ -\ c\otimes c\big).
\eeq
By \eqref{6.15}, we have that $D \big(K(Du)\big)$ is perpendicular to $\{a,c\}$ and hence
\begin{equation}}\newcommand{\eeq}{\end{equation}
D \big(K(Du)\big)\ = \ b\otimes b \, D \big(K(Du)\big)
\eeq
which implies that the dilation of $u$ varies only in the direction of $b$. Consequently, $K(Du)$ depends only on $b$ through a certain function $k$:
\begin{equation}}\newcommand{\eeq}{\end{equation}
K\big(Du(x)\big)\ = \ k\big(b(x)\big).
\eeq
Unlike the case $n=2$, when $n=3$ we do \emph{not} obtain that the dilation of three-dimensional optimal quasiconformal immersions is constant, at least not by the previous reasoning.
However, by Theorem \ref{th1} in all dimensions $2\leq n \leq N$ rank-one locally minimal dilation implies solvability of $K_P(Du)\otimes K_P(Du) :D^2u=0$ and by the higher-dimensional extension of Example \ref{ex1}, rank-one locally minimal dilation is genuinely weaker than locally minimal dilation. Although it seems reasonable that $K_P(Du)\otimes K_P(Du):D^2u=0$ is sufficient for rank-one locally minimal dilation, we can not definitely conclude for the validity of the conjecture of Capogna-Raich in \cite{CR} for $n\geq 3$.
\medskip
\noindent \textbf{Acknowledgement.} I am indebted to L.\ Capogna, J.\ Manfredi and Y.\ Yu for their interest in the author's work, their encouragenment and their constructive suggestions.
\bibliographystyle{amsplain}
|
{
"timestamp": "2013-07-17T02:07:38",
"yymm": "1206",
"arxiv_id": "1206.6039",
"language": "en",
"url": "https://arxiv.org/abs/1206.6039"
}
|
\section{Introduction}
M5-brane is one of the most mysterious objects in M-theory \cite{Hull:1994ys}.
M2- and M5-branes, which are two important ingredients of M-theory, are known to support
strange numbers of light degrees of freedom on their worldvolumes \cite{Klebanov:1996un}.
Although the $N^{3/2}$ scalings for $N$ coincident M2-branes have been recently understood
in some detail \cite{Aharony:2008ug,Drukker:2010nc}, the $N^3$
scalings for $N$ M5-branes are not very solidly understood in a microscopic way.
M-theory is related to 10d string theories by having an extra direction emerging in strongly
coupled string theories \cite{Hull:1994ys}, being a circle for the type IIA strings.
This relation is mainly supported by identifying D0-brane states with the Kaluza-Klein states
of M-theory along the circle. Such a relation could still hold in
Euclidean type IIA/M-theories on various curved manifolds with a circle factor.
The relation between type IIA/M-theories via a circle compactification also yields
a similar relation between the D4-brane and M5-brane theories. On M5-branes probing
flat transverse space or its $\mathbb{Z}_2$ orbifold, there live 6d $(2,0)$ superconformal
theories associated with $A_n$ or $D_n$ type gauge groups. The full set of known
6d $(2,0)$ theories actually come in an $ADE$ classification \cite{Witten:1995zh}.
The microscopic details of these
theories are largely unknown. Dimensional reductions of these 6d theories along a small
circle admit descriptions by 5d maximally supersymmetric Yang-Mills theories.
Naively, the resulting 5d theory is supposed to be a dimensional reduction, after which
one expects that information on the 6d physics is lost. There appeared some evidence that
careful studies of the strong-coupling or non-perturbative physics of the 5d theory let us
extract the nontrivial information on the 6d theory compactified on the circle \cite{Aharony:1997th,Douglas:2010iu}. In Minkowskian dynamics, crucial roles are played by
the instanton solitons in the 5d theory, similar to the way in which type IIA D0-branes are
crucial for reconstructing the KK states of the extra circle. In particular, in BPS sectors,
it has been shown in detail that the instanton partition function yields various (expected or
novel) results for 6d $(2,0)$ theory compactified on a circle \cite{Kim:2011mv}: this includes the rigorous proof of the uniqueness of $U(1)$ multi-instanton bound states, discovery of
novel self-dual string bound states which explains some enhancements of degrees of freedom in the Coulomb branch, the study of the symmetric phase instanton index and its agreement with
the DLCQ gravity dual index on $AdS_7\times S^4$.
In this paper, we apply the same idea to the 6d theory on $S^5\times S^1$, and study
them from 5d gauge theories on $S^5$. As the 5d gauge theories (at least apparently) look non-renormalizable, there is a general issue on how to make quantum calculations sensible.
There appeared proposals on possible finiteness of maximally supersymmetric theories in
5d \cite{Douglas:2010iu}. (See also \cite{Lambert:2010wm} for an earlier work.) But even
if this is true, having a good control over all the 5d quantum fluctuations would be
generally difficult. Just as those considered in \cite{Kim:2011mv}, there are many supersymmetric observables which rely less sensitively on quantum fluctuations.
We expect that the BPS observables that we consider in this paper would also be safe:
in fact, based on localization, we are led to consider a supersymmetric path integral
which is secretly Gaussian, for which the UV divergence issue is almost trivial.
So we base our studies on a much more modest but solidly testable proposal that
5d supersymmetric Yang-Mills theory describes 6d $(2,0)$ theory compactified on a
circle at least in the BPS sector. Note that this proposal is not necessarily restricted
to maximal SYM: although we focus on maximal SYM in this paper, we generalize the
study to less supersymmetric theories in a follow up work \cite{Kim:2012qf}.
The $(2,0)$ theory on $S^5\times S^1$ is interesting for various reasons.
Firstly, any 6d CFT on flat spacetime can be put on $S^5\times\mathbb{R}$ by radial
quantization, where $\mathbb{R}$ is the (Euclidean) time direction. Depending on how one
compactifies the time direction to a circle, the resulting partition function will be an
appropriate index which counts BPS states of this theory. In particular, $S^5\times\mathbb{R}$ is the conformal boundary of global $AdS_7$, so that the large $N$ limits (if available)
of these theories could have gravity duals on global $AdS_7$ \cite{Maldacena:1997re}.
AdS$_7$/CFT$_6$ is perhaps the least understood duality among various AdS/CFT proposals,
on which we can shed lights with our studies.
When the circle size is small, we are naturally led to study the Euclidean
supersymmetric Yang-Mills theory on the 5-sphere. For the $ADE$ cases, we study the 5d
gauge theories with corresponding gauge groups. For $A_n$ and $D_n$ cases, they can be
understood intuitively as living on `Euclidean D4-branes' wrapping the 5-sphere, if
one reduces the 6d theory on the circle interpreted as the M-theory circle.
We first construct and calculate the partition function of a Yang-Mills quantum field theory
on $S^5$ preserving $16$ real SUSY. To motivate the construction from the 6d $(2,0)$ theory,
we first consider the Abelian 6d $(2,0)$ theory. As this free theory on $\mathbb{R}^6$ is
conformal, one can radially quantize it to obtain a theory on $S^5\times\mathbb{R}$.
The $32$ Killing spinors satisfy one of the two Killing spinor equations:
\begin{equation}
\nabla_M\epsilon=\pm\frac{1}{2r}\Gamma_M\Gamma_\tau\epsilon\ ,
\end{equation}
where $r$ is the radius of $S^5$ and $\tau$ is the Euclidean time.
Since the dependence of $\epsilon$ on $\tau$ is $e^{\pm\frac{1}{2r}\tau}$,
one cannot naively compactify this theory preserving all $32$ SUSY. Instead,
one can introduce an R-symmetry twist (or a Scherk-Schwarz reduction) to obtain a theory
on $S^5\times S^1$ with as much as $16$ SUSY.
This can be done by picking an $SO(2)\subset SO(5)$ R-symmetry. The resulting theory after
the 5d reduction, with tensor-vector dualization, can be straightforwardly generalized to
non-Abelian theories with arbitrary gauge group. Due to the R-symmetry twist, the maximal SYM
on $S^5$ preserves only $SO(2)\times SO(3)$ part of $SO(5)$ R-symmetry.
We calculate and study the partition function of this maximal SYM on $S^5$. We employ
the localization technique to obtain the perturbative contribution given by a simple matrix
integral. We also suggest a simple non-perturbative correction, which is proved in
a follow-up paper \cite{Kim:2012qf}. The M-theory interpretation demands
us to relate the 5d gauge coupling $g_{YM}$ and the circle radius $r_1$ as
\begin{equation}
\frac{4\pi^2}{g_{YM}^2}=\frac{1}{r_1}=\frac{2\pi}{r\beta}\ ,
\end{equation}
where $\beta$ is the (dimensionless) inverse `temperature' like chemical potential. In flat
Minkowskian space, this is relating the instanton (or D0-brane) mass with the Kaluza-Klein
mass on the extra circle. With this interpretation, and also with the R-symmetry twist on
which we elaborate in
section 2, the 5-sphere partition function is identified as an index of the 6d theory with
the chemical potential $\beta$. This index counts BPS states on $S^5\times\mathbb{R}$,
or local BPS operators on $\mathbb{R}^6$.
The fact that our 5d partition function takes the form of an index, with all coefficients
being integers when expanded in the fugacity $e^{-\beta}$, strongly supports that
the 5d Yang-Mills theory is nontrivially capturing the 6d physics.
In the later part of this paper, we mostly consider the $U(N)$ gauge theory in 5d, to
study the $A_{N-1}$ type $(2,0)$ theory in 6d times a decoupled free sector. However, we
comment on some important general features for all $ADE$ gauge groups, and also on possible
fate of the theories with non-$ADE$ gauge groups, including $BCFG$.
Our partition function captures two different features of the 6d theory. Firstly, it tells us
the degeneracy information of the BPS states of the 6d theory. Secondly, and perhaps more
interestingly, it contains the information on the 6d vacuum on $S^5\times\mathbb{R}$.
The unique
vacuum of the radially quantized 6d theory has nonzero Casimir energy. In the large $N$ limit
of the $SU(N)$ and $SO(2N)$ cases, the $AdS_7$ gravity dual predicts its value to be nonzero
and proportional to $N^3$ \cite{Awad:2000aj}. From the gravity side,
this is basically the same $N^3$ appearing in all $AdS_7$ gravity calculations, coming from
$\frac{\ell^5}{G_7}$ combination of the $AdS_7$ radius $\ell$ and 7d Newton constant $G_7$.
Our partition function captures the `index version' of the vacuum Casimir energy, which also
exhibits the $N^3$ scaling in the large $N$ limit. See section 3 and appendix B for what we
mean by the `index Casimir energy.' The difference between the normal Casimir energy of
CFT and ours is that ours uses an unconventional regularization for the Casimir energy,
which is naturally chosen by the definition of the index we consider.
Curiously, the perturbative partition functions of our theories with $16$ SUSY on $S^5$
turn out to take identical forms as the partition functions of pure Chern-Simons theories
on $S^3$, when we appropriately identify the Chern-Simons coupling constant with the 5d
coupling constant.
Upon adding a simple non-perturbative correction to the above perturbative part,
we also show that the $U(N)$ index completely agrees with the supergravity index
on $AdS_7\times S^4$ in the large $N$ limit. Also, our finite $N$ index is a function
which appears in various different physical/mathematical contexts. See section 3.2 for
the details.
We also provide a matrix integral form of the perturbative part of a generalized
partition function, which we suppose to be a more refined 6d index with two chemical
potentials. For this we study a SYM theory on $S^5$ preserving $8$ SUSY, which can be
regarded as a Scherk-Schwarz reduction of the 6d $(2,0)$ theory with more general
$U(1)\subset SO(5)$ embedding. In one limit, we suggest that the generalized partition
function captures the spectrum of half-BPS states of the 6d theory, whose
general structures are explored, for instance, in \cite{Bhattacharyya:2007sa}.
The remaining part of this paper is organized as follows. In section 2, we motivate
our theory on $S^5$ by taking a Scherk-Schwarz reduction of the Abelian
6d $(2,0)$ theory. The resulting 5d theory is generalized to a non-Abelian theory on $S^5$.
In section 3, we calculate the perturbative partition function and show that
it takes the same form as the Chern-Simons partition function on $S^3$. Adding non-perturbative
corrections, we study the index Casimir energy, the large $N$ index and the dual gravity
index. We finally present a matrix integral form of a
generalized partition function which we expect to be a more refined 6d index.
Appendix A explains the scalar/spinor/vector spherical harmonics on $S^5$,
as well as some path integral calculations. Appendix B explains that the superconformal
indices (of which our partition function is a special sort) in various dimensions capture
the index version of Casimir energies and study their properties.
As we were finalizing the preparation of this manuscript, we received
\cite{Kallen-Qiu-Zabzine} which partly overlaps with our section 3.3. Their result is a
special case of ours in section 3.3 with $\Delta=\frac{1}{2}$.
\section{Maximal SYM on the 5-sphere}
\subsection{Motivation from Abelian theories}
As a motivation, we would like to reduce the radially quantized Abelian
$(2,0)$ theory on a circle to obtain a theory on $S^5$ with $16$ SUSY.
The resulting 5d theory will be generalized to non-Abelian
theories in section 2.2.
The $32$ Killing spinors on Minkowskian $S^5\times\mathbb{R}$ satisfy one of
the two equations
\begin{equation}\label{killing}
\nabla_M\epsilon_\pm=\pm\frac{i}{2r}\Gamma_M\Gamma_0\epsilon_\pm\ ,
\end{equation}
where $M=0,1,2,3,4,5$, and $r$ is the radius of $S^5$. Taking $M=0$, one
finds the time dependence
\begin{equation}
\epsilon_\pm(\tau)=e^{\mp\frac{i}{2r}t}\epsilon_{0\pm}\ .
\end{equation}
The spinors with two signs yield Poincare/conformal supercharges, respectively,
which should be suitably complex conjugate to each other.
We first consider the properties of our spinors in some detail. The matter and
Killing spinors of the 6d $(2,0)$ theory are all spinors in spacetime $SO(5,1)$
(or $SO(6)$ in Euclidean theories) and the $SO(5)_R$ R-symmetry.
The $8\times 8$ gamma matrices in 6d can be written in terms of the $4\times 4$
5d gamma matrices $\gamma_\mu$ (which shall be useful after a circle reduction) as
\begin{equation}
\Gamma_\mu=\gamma_\mu\otimes\sigma_1\ ,\ \ \Gamma_\tau={\bf 1}_4\otimes\sigma_2
\end{equation}
on a Euclidean space. Multiplication of factor $i$ to $\Gamma_\tau$ will convert
it to the Lorentzian gamma matrices.
The 6d chirality matrix $\Gamma^{123456}=i\sigma_3$ demands that a chiral spinor
have $\sigma_3=+1$ eigenvalue. To be concrete, we take
the following representation of the 5d gamma matrices in this paper ($\sigma^{1,2,3}$
are Pauli matrices):
\begin{equation}
\gamma^{1,2,3}=\sigma^{1,2,3}\otimes\sigma^{1}\ ,\ \
\gamma^4={\rm 1}_2\otimes\sigma^2\ ,\ \ \gamma^5=-{\bf 1}_2\otimes\sigma^3\ .
\end{equation}
These satisfy $\gamma^{12345}=1$. Also, for the internal $SO(5)$ spinors, we
introduce the $4\times 4$ gamma matrices $\hat\gamma^I$ ($I=1,2,3,4,5$) as
\begin{equation}\label{internal-gamma}
\hat\gamma^1=\sigma^1\otimes\sigma^1\ ,\ \ \hat\gamma^2=\sigma^2\otimes\sigma^1\ ,\ \
\hat\gamma^4=\sigma^3\otimes\sigma^1\ ,\ \ \hat\gamma^5={\bf 1}_2\otimes\sigma^2\ ,\ \
\hat\gamma^3=\hat\gamma^{1245}=-{\bf 1}_2\otimes\sigma^3\ ,
\end{equation}
which satisfy $\hat\gamma^{12345}=1$.
With the above convention for gamma matrices, one finds (in the Lorentzian case)
\begin{equation}
(\Gamma_M)^T=\left(\Gamma_1,-\Gamma_2,\Gamma_3,-\Gamma_4,\Gamma_5,-\Gamma_0\right)
=\pm C_\pm\Gamma_MC_\pm^{-1}
\end{equation}
with $C_+\sim\Gamma_{135}\sim\gamma_{24}\otimes\sigma^1\equiv C\otimes\sigma^1$ and
$C_-\sim\Gamma_{240}\sim\gamma_{24}\otimes\sigma_2=C\otimes\sigma^2$.
Here, $C$ is the charge conjugation matrix in 5d in our convention.
Killing spinors $\epsilon_\pm$ are
related by a symplectic charge conjugation, using either of $C_\pm$ together with
the $SO(5)_R\sim Sp(4)$ internal charge conjugation $\hat{C}\sim
\hat\gamma^{25}=i\sigma^2\otimes\sigma^3$. Namely, the Killing spinors satisfy
$\epsilon_-^T=\bar\epsilon_+C\otimes\hat{C}$. With the appearance of $\Gamma^0$ in
$\bar\epsilon_+=\epsilon_+^\dag\Gamma^0$, the symplectic charge conjugation with
Lorentzian signature does not flip the 6d chirality. Also, it is easy to see that the
equations (\ref{killing}) for $\epsilon_\pm$ correctly transform into each other by
the above conjugation. So $\epsilon_\pm$ can both be taken to be in the ${\bf 4}$
representation of $SO(6)$, yielding 6d $(2,0)$ SUSY.
On the other hand, in Euclidean 6d, one finds
\begin{equation}
(\Gamma_M)^\ast=\left(\Gamma_1,-\Gamma_2,\Gamma_3,-\Gamma_4,\Gamma_5,-\Gamma_6
\right)=\pm C_\pm\Gamma_MC_\pm^{-1}
\end{equation}
with same $C_\pm$ as in the Lorentzian case. So one may be tempted to relate $\epsilon_\pm$
by a similar symplectic Majorana condition $\epsilon_-\stackrel{?}{=}C\otimes\hat{C}\epsilon_+^\ast$.
This time, the charge conjugation flips the 6d chirality. Also, changing
$\Gamma_0$ on the right hand side of (\ref{killing}) to make it into $\Gamma_6$ along
$\tau$ direction, $\epsilon_\pm$ equations are no longer related to each other with the above
conjugation. A natural charge conjugation in the radially quantized Euclidean CFT is to accompany
it with the sign flip of $\tau$ \cite{Bhattacharyya:2007sa}, as this is changing particles
into anti-particles. (This is basically remembering the Lorentzian physics via $\tau=it$.)
Also, we multiply $\Gamma_6$ on the charge conjugation matrix to have all matter and Killing
spinors to have same chirality. Combining the charge conjugation with $\tau\rightarrow-\tau$
and a multiplication of $\Gamma_6$, one finds that the Euclidean version of (\ref{killing})
for $\epsilon_\pm$ are related to each other.
Thus, we have $32$ real Killing spinors in both Lorentzian and Euclidean
6d theories, all being chiral.
Now we consider the Euclidean theory with time $\tau$.
Since all Killing spinors depend on $\tau$, naive compactification
on $S^5\times S^1$ breaks all SUSY. To preserve $16$ SUSY, one suitably twists
the theory with an $SO(2)\subset SO(5)$ chemical potential to admit constant spinors
on $S^1$. Namely, taking a ${\bf 5}\rightarrow({\bf 3},{\bf 1})+({\bf 1},{\bf 2})$ decomposition of an $SO(5)\supset SO(3)\times SO(2)$ vector, one takes the $SO(2)$
which rotates ${\bf 2}$ and introduces
the background gauge field which covariantizes
\begin{equation}
\nabla_\tau\rightarrow\ \nabla_\tau+\frac{i}{2r}\hat{\gamma}^{45}\ .
\end{equation}
This will correspond to introducing a chemical potential for the $SO(2)$ R-charge
of the 6d theory, which we shall explain in detail shortly. The $M=6$ components of
the Killing spinor equation then becomes
\begin{equation}
\partial_\tau\epsilon_\pm=\frac{1}{2r}\left(\pm 1-i\hat\gamma^{45}\right)\epsilon_\pm\ .
\end{equation}
So in the case with $\pm$ sign, we take the Killing spinors with $i\hat{\gamma}^{45}=\pm 1$
eigenvalue to obtain $16$ SUSY. The resulting 5d Killing spinors satisfy
\begin{equation}
\nabla_\mu\epsilon_\pm=\mp\frac{1}{2r}\Gamma_\mu\Gamma_\tau\epsilon_\pm
=-\frac{i}{2r}\Gamma_\mu\Gamma_\tau\hat{\gamma}^{45}\epsilon_\pm\ .
\end{equation}
The 5d Killing spinor equation is
thus given by (using $\sigma^3\epsilon_\pm=\epsilon_\pm$)
\begin{equation}\label{killing-5d}
\nabla_\mu\epsilon=\frac{1}{2r}\gamma_\mu\hat{\gamma}^{45}\epsilon\ ,
\end{equation}
which includes both $\epsilon_\pm$ cases.
This is the same as one of the Killing spinor equations studied in \cite{Blau:2000xg}
in 5d (although \cite{Blau:2000xg} discussed Minkowskian Einstein manifold).
In the reduced 5d perspective, we simply take the charge conjugation
$\epsilon_-=C\otimes\hat{C}\epsilon_+^\ast$ without knowing about $\tau$ flip. We
also forget the $\Gamma_6={\bf 1}\otimes\sigma_2$ multiplication by regarding
$\epsilon_\pm$ as 4 component spinors in 5d. $i\hat\gamma^{45}$ transforms under
this 5d charge conjugation as
\begin{equation}
\hat{C}^{-1}(i\hat\gamma^{45})\hat{C}=-(i\hat\gamma^{45})^\ast\ .
\end{equation}
So $C\otimes\hat{C}\epsilon_+^\ast$ has the opposite sign in its $\hat\gamma^{45}$
eigenvalue to $\epsilon_+$, making it possible to identify it as $\epsilon_-$.
To conclude, the spinors $\epsilon$ satisfying (\ref{killing-5d}) can be regarded as
forming a set of $8$ Poincare SUSY Q and $8$ conformal SUSY S in 6d perspective, which
closes into itself under Hermitian conjugation.
These $8$ complex or $16$ real Killing spinors will be the SUSY of our 5d SYM.
As a more general twisting, one can choose different $SO(2)$ embeddings in $SO(5)$,
which generically result in a 5d theory with $8$ preserved SUSY upon circle reduction. One
introduces the twisting which covariantizes
\begin{equation}
\nabla_\tau\ \rightarrow\ \ \nabla_\tau+\frac{i}{2r}\left(\Delta\hat\gamma^{45}
+(1-\Delta)\hat\gamma^{12}\right)
\end{equation}
on spinors, where $\Delta$ is a real constant. By following the discussions
of the last paragraph, one finds that the reduced 5d theory preserves $8$ SUSY,
which satisfies $i\hat\gamma^{45}=i\hat\gamma^{12}=\pm 1$ projection for $\epsilon_\pm$,
respectively.
Now let us capture some key aspects of the 5d Abelian gauge theory obtained by reducing
the 6d free tensor theory on the circle, with the above R-symmetry twist. In the
$r\rightarrow\infty$ limit, we simply get the maximal SYM in 5 dimension. The coupling
to the background curvature yields various mass terms in the Abelian theory. From the
viewpoint of the 6d theory on $S^5\times S^1$, the mass terms come from
two sources. Firstly, when one radially quantizes the 6d theory, all 5 real scalars
acquire the conformal mass terms with mass $m=\frac{2}{r}$, since the free scalars have
dimension $2$. This yields the 6d mass terms
\begin{equation}
\frac{2}{r^2}(\phi^a)^2+\frac{2}{r^2}(\phi^i)^2
\end{equation}
with $a=1,2,3$, $i=4,5$, in the convention that the kinetic terms are
$\frac{1}{2}(\partial\phi^a)^2+\frac{1}{2}(\partial\phi^i)^2$. In 5d, extra
contributions to the mass terms are induced from the kinetic term with $\tau$
derivatives, since we now have the $SO(2)$ twists. There is no extra contribution
for $\phi^a$, but the $\tau$ derivatives on $\phi^i$ and the fermions $\lambda$ are
twisted as
\begin{eqnarray}
\nabla_\tau\phi^i&\rightarrow&\nabla_\tau\phi^i-\frac{i}{r}\epsilon^{ij}\phi^j\nonumber\\
\nabla_\tau\lambda&\rightarrow&(\nabla_\tau-\frac{i}{2r}\hat\gamma^{45})\lambda\ .
\end{eqnarray}
respectively. The 6d kinetic terms thus provide extra contribution to the 5d masses
\begin{equation}
\frac{1}{2}(\nabla_\tau\phi^i)^2+\frac{1}{2}\lambda^\dag\nabla_\tau\lambda
+\frac{2}{r^2}(\phi^a)^2+\frac{2}{r^2}(\phi^i)^2\rightarrow\frac{2}{r^2}(\phi^a)^2
+\frac{3}{2r^2}(\phi^i)^2-\frac{i}{4r}\lambda^\dag\hat\gamma^{45}\lambda\ .
\end{equation}
Adding the last scalar and fermion mass terms to be maximal SYM action (with obvious
covariantization with the 5-sphere metric), one is supposed to obtain an Abelian action
which preserves $16$ SUSY. We shall explicitly show that the theory preserves $16$ SUSY
with above masses in section 2.2, with a non-Abelian completion.
The case with general $SO(2)$ embedding can be studied as well. The resulting scalar
and fermion mass terms are given by
\begin{equation}
\frac{4-(1-\Delta)^2}{2r^2}(\phi^a)^2
+\frac{4-\Delta^2}{2r^2}(\phi^i)^2-\frac{i}{4r}\lambda^\dag
\left(\Delta\hat\gamma^{45}+(1-\Delta)\hat\gamma^{12}\right)\lambda\ .
\end{equation}
We shall come back to this version of non-Abelian theory with $8$ SUSY later.
Before proceeding, we illustrate the nature of the 6d partition functions that we expect
our 5d calculations to capture, with the example of 6d Abelian $(2,0)$ theory on
$S^5\times\mathbb{R}$.
Up to global rotations and charge conjugation, the BPS bound given by a chosen pair
of $Q$ and $S$ via $\{Q,S\}$ in 6d is given by
\begin{equation}\label{bound}
\epsilon\geq 2(R_1+R_2)+j_1+j_2+j_3\ ,
\end{equation}
where $R_1$ is the $SO(2)$ R-symmetry we used to twist the time derivative.
$R_2$ is another Cartan of $SO(5)$ in the orthogonal 2-plane basis, and
$j_1,j_2,j_3$ are three $SO(6)$ Cartans, again in the three orthogonal 2-plane basis.
The twist above with $8$ SUSY uses $\Delta R_1\!+\!(1-\Delta)R_2$. There is one
Poincare supercharge $Q$ saturating the above energy bound, which has $R_1=R_2=\frac{1}{2}$,
$j_1=j_2=j_3=-\frac{1}{2}$, $\epsilon=\frac{1}{2}$.
The index which counts BPS states saturating this bound is studied
in \cite{Kinney:2005ej,Bhattacharya:2008zy}. It is defined as
\begin{equation}\label{superconformal}
{\rm Tr}\left[(-1)^Fe^{-\beta^\prime\{Q,S\}}x^{3\epsilon+j_1+j_2+j_3}
y^{R_1-R_2}a^{j_1}b^{j_2}c^{j_3}\right]
\end{equation}
with a constraint $abc=1$. $\beta^\prime$ is the usual regulator in the Witten index.
For the $U(1)$ $(2,0)$ theory,
the full index $Z$ is given by the Plethystic (or multi-particle)
exponential of the letter index $z$ \cite{Bhattacharya:2008zy}
\begin{equation}\label{abelian-index}
z=\frac{x^6(y+y^{-1})-x^8(ab+bc+ca)+x^{12}}{(1-x^4a)(1-x^4b)(1-x^4c)}\ ,\ \
Z=x^{\epsilon_0}\exp\left[\sum_{n=1}^\infty\frac{1}{n}z(x^n,y^n,a^n,b^n,c^n)\right]\ .
\end{equation}
$\epsilon_0$ is the `index version' of the vacuum Casimir energy of the Abelian theory
on $S^5\times\mathbb{R}$. See appendix B. The terms in the numerators can be easily
understood from the BPS fields in the free Abelian tensor multiplet. The first two
terms come from two complex scalars (among $5$ real) taking charges
$\Phi^{(R_1,R_2)}_{(j_1,j_2,j_3)}=\Phi^{(1,0)}_{(0,0,0)}$ and $\Phi^{(0,1)}_{(0,0,0)}$.
The next $3$ terms come from three chiral fermions with charges
$\Psi^{(R_1,R_2)}_{(j_1,j_2,j_3)}=\Psi^{(+,+)}_{(-,+,+)}$, $\Psi^{(+,+)}_{(+,-,+)}$
and $\Psi^{(+,+)}_{(+,+,-)}$, where $\pm$ denote $\pm\frac{1}{2}$. The final term $+x^{12}$
is for a fermionic constraint coming from a component of the Dirac equation which
contains BPS fields and derivatives only,
$(\slash\hspace{-.25cm}\partial\Psi)^{(+,+)}_{(+,+,+)}=0$. The three factors in the
denominator come from acting three holomorphic derivatives to the above BPS fields
and constraints, which have $R_1=R_2=0$ and $(j_1,j_2,j_3)=(1,0,0)$, $(0,1,0)$ and
$(0,0,1)$.
The contribution
$x^{\epsilon_0}$ is normally ignored in the literature on the superconformal index, but
should be there as an overall multiplicative factor in path integral approaches
\cite{Aharony:2003sx}. Of course
the index (\ref{superconformal}) can be defined in non-Abelian theories
with the same $(2,0)$ superconformal algebra.
There are two interesting limits of this general index which we consider in this paper.
Firstly, one can take $x\rightarrow 0$, $y\rightarrow\infty$, keeping $x^6y\equiv q$ fixed.
The letter index $z$ becomes $z=q$ in this limit, yielding
\begin{equation}
Z=\lim_{x\rightarrow 0}(x^{\epsilon_0})\frac{1}{1-q}\ .
\end{equation}
The first factor either goes to zero or infinity. As we explain in appendix B,
the Casimir energy for the 6d $(2,0)$ theory is expected to be negative. In any case,
one normally considers the remaining factor $\frac{1}{1-q}$,
which is the half-BPS partition function which acquires contribution from operators made
of a single complex scalar. Its non-Abelian version for $U(N)$ gauge
group \cite{Bhattacharyya:2007sa} is explained in section 3.3.
Another limit, which is of more interest to us in this paper, is obtained by taking
all but one fugacity variables to be $1$, so that more cancelations are expected to
appear than the general superconformal index. We call this the unrefined index.
To explain this limit, we start by noting that
the supercharges chosen above commutes with $\epsilon-R_1$. The fugacity conjugate to
this charge is a particular combination of the four fugacities $x,y,a,b(,c)$.
We turn off three fugacities to $1$ apart from the one conjugate to $\epsilon-R_1$,
which we call $q$. More concretely, we first rewrite the measure in (\ref{superconformal})
using the BPS relation $\epsilon=2R_1+2R_2+j_1+j_2+j_3$:
\begin{equation}
x^{3\epsilon+j_1+j_2+j_3}y^{R_1-R_2}a^{j_1}b^{j_2}c^{j_3}=
x^{4\epsilon}(yx^{-2})^{R_1}(yx^2)^{-R_2}a^{j_1}b^{j_2}c^{j_3}\ .
\end{equation}
Then setting $a=b=c=x^2y=1$, and defining $q\equiv x^4$, the measure becomes
$q^{\epsilon-R_1}$. Note that, as
the half-BPS energy bound in 6d is $\epsilon\geq 2|R_1|$, $\epsilon-R_1$ is positive
definite for all states. Rewriting the unrefined letter index (\ref{abelian-index})
using $q$ only, one obtains
\begin{equation}
z=\frac{q+q^2-3q^2+q^3}{(1-q)^3}=\frac{q}{1-q}\ ,\ \
Z=q^{\epsilon_0}PE\left[\frac{q}{1-q}\right]=q^{\epsilon_0}\prod_{n=1}^\infty
\frac{1}{1-q^n}\ .
\end{equation}
Although the second limit is very different from the first limit above for the half-BPS
states, it has a special property associated with the same $16$ SUSY. Namely,
$\epsilon-R_1$ commutes with exactly the same $16$ supercharges preserved by
the half-BPS states considered in the last paragraph. Superconformal indices can be
defined by choosing any $2$ mutually conjugate supercharges $Q$, $S$ among them.
One would obtain the same result no matter which pair one chooses.
The R-symmetry twist we introduced above for the Abelian theory provides the
chemical potential to $R_1$ as well, so that we weight the states by
$e^{-\beta(\epsilon-R_1)}$. The $16$ SUSY of the 5d theory refers to those in 6d
which commutes with $\epsilon-R_1$. Thus, we expect the partition function of
this 5d theory with $16$ SUSY to be the second limit of the superconformal index,
with identification $q=e^{-\beta}$ of the fugacity and the gauge coupling.
During detailed calculations in later sections, we shall
use localization by picking any of the $16$ SUSY of the theory. The result is
guaranteed to be the same from 5d perspective as the path integral preserves all
$16$ SUSY, among which we only use a pair. This is consistent with our observation
in the previous paragraph from the 6d perspective, that same result will be obtained
no matter what supercharges one chooses to define the index.
An important property of the second limit is that the information on the vacuum
Casimir energy is not lost. So if one can compute the partition function for
non-Abelian theories, the $N^3$ scaling is supposed to be calculable
in a microscopic way.
The information on the above two limiting cases of the superconformal index is
all encoded in the following simplified index. Namely, we consider an unrefined
index which contains only two chemical potentials conjugate to $\epsilon-R_1$,
$\epsilon-R_2$. In (\ref{abelian-index}), this amounts to turning off $a,b,c$ and
keeping $x$, $y$ only. We weight the states as $q_1^{\epsilon-R_1}q_2^{\epsilon-R_2}$.
The resulting letter index for the Abelian theory becomes
\begin{equation}
z=\frac{q_1q_2^2+q_1^2q_2-3q_1^2q_2^2+q_1^3q_2^3}{(1-q_1q_2)^3}\ .
\end{equation}
The first term in the numerator comes from a complex scalar which defines the half-BPS
states. The scaling limit $q_1\rightarrow 0$, $q_2\rightarrow\infty$ which keeps
$q\equiv q_1q_2^2$ finite takes the above letter index to $q$, which yields the desired
half-BPS partition function for the Abelian theory. In the 5d reduction, the parameters $\beta,\Delta$ are related to
$q_1$, $q_2$ by
\begin{equation}\label{two-chemical}
q_1=e^{-\beta\Delta}\ ,\ \ q_2=e^{-\beta(1-\Delta)}\ ,\ \
q\equiv q_1q_2^2=e^{-\beta(2-\Delta)}\ .
\end{equation}
The half-BPS limit amounts to taking
\begin{equation}
\beta\rightarrow\infty\ ,\ \ \Delta\rightarrow 2\ ,\ \ \beta(2-\Delta)={\rm fixed}\ .
\end{equation}
In section 3.2, we shall explain the structure of the $S^5$ partition with two parameters
$\beta,\Delta$, which is supposed to capture the 6d index
${\rm Tr}[(-1)^Fq_1^{\epsilon-R_1}q_2^{\epsilon-R_2}]$.
With more twists with the global symmetries of the theory, including R-symmetries
above as well as spatial rotations, it will be possible to obtain a 5d action
which preserves less supersymmetries, and presumably on a squashed $S^5$.
Then one can reduce the Abelian theory along the circle to obtain a 5d theory, and
calculate the partition function after a non-Abelian generalization which can be used
to study the general superconformal index \cite{Kinney:2005ej,Bhattacharya:2008zy} of
the 6d $(2,0)$ theory. This problem is studied in our later work \cite{Kim:2012qf}.
\subsection{Non-Abelian theories}
We generalize the above Abelian 5d theory on the 5-sphere, with
$SO(3)\times SO(2)$ subgroup of $SO(5)$ R-symmetry preserved by the curvature
coupling, to the non-Abelian gauge groups. We find that the action is
\begin{eqnarray}\label{action}
S&=&\frac{1}{g_{YM}^2}\int d^5x\sqrt{g}\
{\rm tr}\left[\frac{1}{4}F_{\mu\nu}F^{\mu\nu}+\frac{1}{2}D_\mu\phi^I D^\mu\phi^I
+\frac{i}{2}\lambda^\dag\gamma^\mu D_\mu\lambda-\frac{1}{4}[\phi^I,\phi^J]^2
-\frac{i}{2}\lambda^\dag\hat\gamma^I[\lambda,\phi^I]\right.\nonumber\\
&&\hspace{2.7cm}\left.+\frac{4}{2r^2}(\phi^a)^2+\frac{3}{2r^2}(\phi^i)^2
-\frac{i}{4r}\lambda^\dag\hat\gamma^{45}\lambda-\frac{1}{3r}\epsilon_{abc}\phi^a
[\phi^b,\phi^c]\right]\ ,
\end{eqnarray}
where $I,J=1,2,3,4,5$, $a\!=\!1,2,3$, $i\!=\!4,5$ are the vector indices of $SO(5)$
R-symmetry. $\gamma^\mu$ and $\hat\gamma^I$ are $4\times 4$
gamma matrices for the spatial/internal $SO(5)$, respectively.
This action is invariant under the following $16$ supersymmetries:
\begin{eqnarray}\label{SUSY}
-i\delta A_\mu&=&\frac{i}{2}\lambda^\dag\gamma_\mu\epsilon-\frac{i}{2}\epsilon^\dag
\gamma_\mu\lambda\\
-i\delta\phi^I&=&-\frac{1}{2}\lambda^\dag\hat\gamma^I\epsilon+\frac{1}{2}\epsilon^\dag
\hat\gamma^I\lambda\nonumber\\
-i\delta\lambda&=&\frac{1}{2}F_{\mu\nu}\gamma^{\mu\nu}\epsilon+iD_\mu\phi^I\gamma^\mu
\hat\gamma^I\epsilon-\frac{i}{2}[\phi^I,\phi^J]\hat\gamma^{IJ}\epsilon
+\frac{2i}{r}\phi^a\hat\gamma^{a45}\epsilon+\frac{i}{r}\phi^i\hat\gamma^{i}\hat\gamma^{45}
\epsilon\nonumber\\
-i\delta\lambda^\dag&=&-\frac{1}{2}\epsilon^\dag\gamma^{\mu\nu}F_{\mu\nu}+
i\epsilon^\dag\hat\gamma^I\gamma^\mu D_\mu\phi^I-\frac{2i}{r}\epsilon^\dag\hat\gamma^{45a}\phi^a-\frac{i}{r}\epsilon^\dag
\hat\gamma^{45}\hat\gamma^i\phi^i+\frac{i}{2}\epsilon^\dag\hat\gamma^{IJ}[\phi^I,\phi^J]
\nonumber
\end{eqnarray}
where $\epsilon$ satisfies
\begin{equation}
\nabla_\mu\epsilon=\frac{1}{2r}\gamma_\mu\hat\gamma^{45}\epsilon\ ,\ \
\nabla_\mu\epsilon^\dag=-\frac{1}{2r}\epsilon^\dag\gamma_\mu\hat\gamma^{45}
\end{equation}
on $S^5$. As we already explained with the Abelian theories, we take $\epsilon_+$ with
$i\hat\gamma^{45}=+1$ eigenvalues, which is related to $\epsilon_-$ with $-1$
eigenvalue by a symplectic charge conjugation.
We explain the reality property of the action and SUSY transformation in some detail.
Imposing the symplectic Majorana conditions for all matter and Killing spinors, the
action (\ref{action}) is real apart from the last term which is cubic in the scalars.
Also, we note that the SUSY transformations between scalars-fermions are all real,
while those between vector-fermions are all imaginary, i.e. violating reality condition.
The factor $-i$ we inserted on the left hand sides of (\ref{SUSY}) guarantees the
above property.\footnote{Compared to the 5d maximal SYM action on the flat Euclidean space,
perhaps this $-i$ factor is unconventional. In the last case, the reality condition is
often ignored as we are in a Euclidean space.}
So in the path integral with this action, the $16$ SUSY transformations should be
regarded as symmetry transformations associated with changes of some integration
contours. The localization method that we shall use later in this paper applies with
such a complexification.
Technically, we started with the Abelian theory on $S^5$ obtained by a
Scherk-Schwarz reduction from 6d, and then added non-Abelian terms to SUSY and action,
trying to secure $16$ SUSY. We think the complex transformation and action are compulsory
consequences of this analysis, as we also tried but failed to find other real versions.
At least one can motivate why gauge fields-fermion part of the transformation could be
imaginary from the Abelian theory (in which case the action is actually real).
Consider some part of $16$ SUSY, e.g. $8$ SUSY that we consider in the later part
of this section. This choice of $8$ SUSY provides a notion of vector and hypermultiplets.
The supersymmetric reduction of the free hypermultiplet part is quite clear, and we
find no reason to ruin the reality of the SUSY transformation in this part. However,
the gauge field/fermion part seems somewhat subtle. In the Lorentzian theory on
$S^5\times\mathbb{R}$, the self-dual 3-form condition $H_{\mu\nu\rho}=\frac{1}{2}\epsilon_{\mu\nu\rho\alpha\beta}H^{\alpha\beta 0}$ can be
solved by naturally taking $F_{\mu\nu}=H_{\mu\nu 0}$ to be independent momentum-like fields,
subject to 6d Bianchi identity for $H_{MNP}$. In the Euclidean theory on
$S^5\times\mathbb{R}$,
covariant self-dual condition cannot be imposed. Still we want to secure the number
of degrees of freedom as this will be natural for getting the correct physics.
If we stick to the definition of $F_{\mu\nu}$ as $H_{\mu\nu 0}$, one would have to
continue $F_{\mu\nu}$ to $H_{\mu\nu 6}=-iH_{\mu\nu 0}=-iF_{\mu\nu}$ along
$\tau=it$. This extra factor of $i$ would make the vector-fermion
SUSY transformation to be imaginary. Combined with the formal SUSY checks that we did,
which independently yielded imaginary transformations, we feel that (\ref{SUSY}) is
somewhat inevitable.\footnote{However, one could have imposed different reality conditions
on various fields. For instance, the choice of \cite{Hosomichi:2012ek} is
different from ours in many places. Although not all the prescriptions in
\cite{Hosomichi:2012ek} are well motivated to us, by suitable analytic continuations
or complexifications we can make half of our SUSY to fit into theirs.}
One can check that the supersymmetry algebra is $SU(4|2)$. Firstly, one can obtain
the following commutation relations
\begin{eqnarray}\label{superalgebra}
[\delta_1,\delta_2]\phi^{a} &=& 2i\epsilon_1^\dagger\gamma^\mu\epsilon_2 D_\mu\phi^a+2i\epsilon_1^\dagger\hat\gamma^J\epsilon_2[\phi^J,\phi^a]
+\frac{4i}{r}\epsilon_1^\dagger\hat\gamma^{ab}\hat\gamma^{45}\epsilon_2\phi^b \nonumber\\
&=& L_v \phi^a +i[\Lambda,\phi^a]+\frac{2i}{r}\epsilon^{abc}\epsilon^\dagger_1
\hat\gamma^b\epsilon_2 \phi^c \,, \\
\left. \right.[\delta_1,\delta_2]\phi^{i} &=&2i\epsilon_1^\dagger\gamma^\mu\epsilon_2 D_\mu\phi^i+2i\epsilon_1^\dagger\hat\gamma^J\epsilon_2[\phi^J,\phi^i]
-\frac{2i}{r}\epsilon_1^\dagger\epsilon_2\epsilon^{ij}\phi^j \nonumber \\
&=& L_v \phi^i +i[\Lambda,\phi^i]+\frac{i}{r}\epsilon_1^\dagger\epsilon_2
\epsilon^{ij}\phi^j\,, \nonumber\\
\left. \right. [\delta_1,\delta_2]A_\mu &=& 2i\epsilon_1^\dagger\gamma^\nu\epsilon_2F_{\nu\mu} +2\epsilon_1^\dagger\hat\gamma^I\epsilon_2D_\mu\phi^I-\frac{2}{r}\epsilon^{ij}
\epsilon_1^\dagger\gamma_\mu\hat\gamma^i\epsilon_2\phi^j \nonumber\\
&=& L_v A_\mu +D_\nu\Lambda\nonumber\\
\left.\right.[\delta_1,\delta_2]\lambda&=&L_v\lambda+i[\Lambda,\lambda]
+\frac{1}{4}\Theta^{\mu\nu}\gamma_{\mu\nu}\lambda-i\epsilon^\dag\epsilon_2
\hat\gamma^{45}\lambda-2i\epsilon_1^\dag\hat\gamma^a\epsilon_2\hat\gamma^{a45}\lambda
+({\rm eqn\ of\ motion})\nonumber
\end{eqnarray}
where
\begin{eqnarray}
&&v^\mu = 2i\epsilon^\dagger_1\gamma^\mu\epsilon_2 \ ,\ \
\Lambda =-2i \epsilon^\dagger_1\gamma^\mu\epsilon_2 A_\mu + 2\epsilon^\dagger_1\tilde\gamma^I\epsilon_2\phi^I \,, \nonumber\\
&&L_v\phi^i= v^\mu\partial_\mu \phi^i \ ,\ \ L_v\phi^a=v^\mu\partial_\mu \phi^a \,,
L_v A_\mu = v^\nu\partial_\nu A_\mu + \partial_\mu v^\nu A_\nu\ ,\nonumber\\
&&\Theta^{\mu\nu}=\nabla^{[\mu}\xi^{\nu]}+\xi^\lambda\omega_\lambda^{\ \mu\nu}\ .
\end{eqnarray}
In 6d $SU(4|2)$, the bosonic
subgroup is $SU(4)\times SU(2)\times U(1)$, where the $U(1)$ part is $\epsilon-R_1$.
By dimensional reduction to $S^5$, one is only left
with $-R_1$ which appears on the right hand side of (\ref{superalgebra}) as rotations
by $\epsilon^{ij}\phi^j$. Also, using the following Fierz identities
\begin{eqnarray}
\hspace*{-1cm}(\epsilon_1^\dag\gamma_\nu\epsilon_2)
(\epsilon_3^\dag\gamma^{\mu\nu}\hat\gamma^{45}\epsilon_4)
\!\!&\!=\!&\!\!-\frac{1}{4}(\epsilon_1^\dag\epsilon_4)
(\epsilon_3^\dag\gamma^{\mu\nu}\gamma_\nu\hat\gamma^{45}\epsilon_2)
-\frac{1}{4}(\epsilon_1^\dag\gamma^\alpha\epsilon_4)
(\epsilon_3^\dag\gamma^{\mu\nu}\gamma_\alpha\gamma_\nu\hat\gamma^{45}\epsilon_2)+\frac{1}{8}
(\epsilon_1^\dag\gamma^{\alpha\beta}\epsilon_4)(\epsilon_3^\dag\gamma^{\mu\nu}
\gamma_{\alpha\beta}\gamma_\nu\hat\gamma^{45}\epsilon_2)\nonumber\\
\hspace*{-1cm}(\epsilon_1^\dag\gamma^{\mu\nu}\hat\gamma^{45}\epsilon_2)
(\epsilon_3^\dag\gamma_\nu\epsilon_4)\!\!&\!=\!&\!\!-\frac{1}{4}(\epsilon_1^\dag\epsilon_4)
(\epsilon_3^\dag\gamma_\nu\gamma^{\mu\nu}\hat\gamma^{45}\epsilon_2)
-\frac{1}{4}(\epsilon_1^\dag\gamma^\alpha\epsilon_4)
(\epsilon_3^\dag\gamma_\nu\gamma_\alpha\gamma^{\mu\nu}\hat\gamma^{45}\epsilon_2)+\frac{1}{8}
(\epsilon_1^\dag\gamma^{\alpha\beta}\epsilon_4)(\epsilon_3^\dag\gamma_\nu
\gamma_{\alpha\beta}\gamma^{\mu\nu}\hat\gamma^{45}\epsilon_2)\nonumber
\end{eqnarray}
and taking all spinors to belong to $\epsilon_-$, one can check for
$v^\mu=2i\epsilon_1\gamma^\mu\epsilon_2$, $w^\mu=2i\epsilon_3^\dag\gamma^\mu\epsilon_4$
that
\begin{eqnarray}
[v,w]^\mu&=&\mathcal{L}_vw^\mu=-\frac{4}{r}(\epsilon_1^\dag\gamma_\nu\epsilon_2)
\left(\epsilon_3^\dag\gamma^{\mu\nu}\hat\gamma^{45}\epsilon_4\right)
+\frac{4}{r}\left(\epsilon_1^\dag\gamma^{\mu\nu}\hat\gamma^{45}\epsilon_2\right)
(\epsilon_3^\dag\gamma_\nu\epsilon_4)\nonumber\\
&=&\frac{8}{r}(\epsilon_1^\dag\epsilon_4)(\epsilon_3^\dag\gamma^\mu\hat\gamma^{45}\epsilon_2)
-\frac{8}{r}(\epsilon_1^\dag\gamma^\mu\epsilon_4)(\epsilon_3^\dag\hat\gamma^{45}\epsilon_2)\ .
\end{eqnarray}
Normalizing spinors as $\epsilon_\alpha^\dag\epsilon_\beta=\delta_{\bar\alpha\beta}$
where $\alpha,\beta=1,2,3,4$ are for ${\bf 4}$ of $SO(6)$, one obtains
\begin{equation}
[v_{\bar{\alpha}\beta},v_{\bar{\gamma}\delta}]^\mu=-\frac{4}{r}
\left(\delta_{\beta\bar{\gamma}}v_{\bar{\alpha}\delta}^\mu
-\delta_{\bar{\alpha}\delta}v_{\bar{\gamma}\beta}^\mu\right)\ ,
\end{equation}
which is forming the desired $SU(4)\sim SO(6)$ algebra. $SU(2)$ part of the algebra
is also easily visible as rotations on $\phi^a$. So we interpret it as the 5d reduction
of $SU(4|2)\subset OSp(8|4)$ superconformal group for the 6d $(2,0)$ theory, commuting
with $\epsilon-R_1$.
By taking all $\epsilon_i$'s to be $\epsilon_-$ above, we obtained the
anti-commutation relations of the type $\{Q,S\}$. The commutation relations of
the form $\{Q,Q\}$ or its conjugate $\{S,S\}$ can be studied by taking $\epsilon_1$
to belong to $\epsilon_-$ and $\epsilon_2$ to belong to $\epsilon_+$ in
(\ref{superalgebra}). Then, one finds
\begin{equation}
\epsilon_1^\dag\gamma^\mu\epsilon_2=0\ ,\ \
\epsilon_1^\dag\hat\gamma^{a}\epsilon_2=0\ ,\ \ \epsilon_1^\dag\epsilon_2=0
\end{equation}
by studying $i\hat\gamma^{45}=(i\hat\gamma^{45})^\dag$ eigenvalues. Thus, the
bosonic elements of the superalgebra do not extend beyond $SU(4|2)$. For instance,
the analysis for 5d SCFT with $F(4)$ symmetry would have yielded $\{Q,Q\}\sim P$,
$\{S,S\}\sim K$ as in \cite{Kim:2012gu}, but they naturally do not appear in our case.
In the next section, we shall use the localization method to perform the path integral
for the partition function. To this end, we attempt to make some part of the supersymmetry
algebra to hold off-shell. The most important requirement is that the single supercharge,
or a pair of conjugate supercharges, which we choose to perform localization calculation
takes the required algebra (nilpotency) off-shell. We take $8$ of our $16$ SUSY, and
decompose the field into the vector and hypermultiplets. The vector multiplet part of the
algebra is made off-shell for all $8$ SUSY by introducing $3$ auxiliary fields, while hypermultiplet part of the algebra is made off-shell only for a subset which includes
a pair of Hermitian SUSY generators. This strategy is all spelled out in
\cite{Hosomichi:2012ek}.
With the internal gamma matrices chosen as (\ref{internal-gamma}), the $8$ SUSY
are chosen by taking $\hat\gamma^3\epsilon=-\epsilon$. The internal charge conjugation
matrix is taken to be $\hat{C}=\hat\gamma^{25}=i\sigma^2\otimes\sigma^3$.
One can write the $8$ SUSY and $16$ component fermion $\lambda$ as
\begin{equation}
\epsilon=\left(\begin{array}{c}\epsilon^1\\ \epsilon^2\end{array}\right)\otimes
\left(\begin{array}{c}1\\0\end{array}\right)\ ,\ \ \lambda=
\left(\begin{array}{c}\chi^1\\ \chi^2\end{array}\right)\otimes
\left(\begin{array}{c}1\\0\end{array}\right)+
\left(\begin{array}{c}\psi^1\\ \psi^2\end{array}\right)\otimes
\left(\begin{array}{c}0\\1\end{array}\right)\ .
\end{equation}
$\epsilon_A,\chi_A,\psi_A$ for $A=1,2$ can be regarded as $SU(2)$ spinors. This $SU(2)$
symmetry is broken in the action by curvature couplings, and only the Cartan generator
proportional to $\sigma^3$ is a symmetry. The $SO(5)$ origin of this $U(1)$ can be easily
traced by noticing $\hat\gamma^{12}=i\sigma^3\otimes{\bf 1}_2$,
$\hat\gamma^{45}=i\sigma^3\otimes\sigma^3$. The $U(1)$ acts on $\chi_A$ as a simultaneous
rotation on $12$ and $45$ planes, while on $\psi_A$ as opposite rotation on the two 2-planes.
For later use, we take a complex 4-component spinor $\psi$ on $S^5$ as $\psi\equiv\psi^2$.
The first component $\psi^1$ is related to $\psi$ by a symplectic-Majorana conjugation using
$SO(5)\times SU(2)$, inherited from our $SO(5)\times SO(5)$ symplectic-Majorana conjugation.
Let us also define the scalars as
\begin{equation}
\phi\equiv\phi^3\ ,\ \ q^1\equiv\frac{1}{\sqrt{2}}(\phi^4-i\phi^5)\ ,\ \
q^2\equiv\frac{1}{\sqrt{2}}(\phi^1+i\phi^2)\ .
\end{equation}
The real scalar $\phi$ participates in the vector multiplet, while $q^A$
belong to the adjoint hypermultiplet.
For the vector multiplet, we introduce three auxiliary fields $D^I$, whose
on-shell values become
\begin{equation}
D^I=-(\sigma^I)^A_{\ B}[q^B,\bar{q}_A]-\frac{i}{r}\delta^I_3\phi\ .
\end{equation}
The off-shell Lagrangian that we shall write in a moment is invariant under
\begin{eqnarray}
-i\delta A_\mu&=&i\chi^\dag\gamma_\mu\epsilon\\
-i\delta\phi&=&\chi^\dag\epsilon\nonumber\\
-i\delta\chi&=&\frac{1}{2}F_{\mu\nu}\gamma^{\mu\nu}\epsilon-iD_\mu\phi\gamma^\mu\epsilon
+\frac{1}{r}\phi\sigma^3\epsilon+iD^I\sigma^I\epsilon\nonumber\\
-i\delta\chi^\dag&=&-\frac{1}{2}F_{\mu\nu}\epsilon^\dag\gamma^{\mu\nu}
-i\epsilon^\dag\gamma^\mu D_\mu\phi-\frac{1}{r}\epsilon^\dag\sigma^3\phi
-i\epsilon^\dag\sigma^ID^I\nonumber\\
-i\delta D^I&=&D_\mu\chi^\dag\gamma^\mu\sigma^I\epsilon-[\phi,\chi^\dag]\sigma^I\epsilon
-\frac{i}{2r}\chi^\dag\sigma^I\sigma^3\epsilon\ .
\end{eqnarray}
The off-shell SUSY algebra including the $8$ Killing spinors is $SU(4|1)$, and is given by
\be
\left[\delta_1,\delta_2\right]\!A_\mu &=& \xi^\nu\partial_\nu A_\mu +\partial_\mu\xi^\nu A_\nu + D_\mu \Lambda , \nn \\
\left[\delta_1,\delta_2\right]\!\phi^3 &=& \xi^\mu\partial_\mu\phi^3 +i[\Lambda,\phi^3] + \rho\phi^3,\nn \\
\left[\delta_1,\delta_2\right]\!\chi &=& \xi^\mu \partial_\mu\chi +\frac{1}{4}\Theta_{\mu\nu}\gamma^{\mu\nu}\lambda+i[\Lambda,\chi] +\frac{3}{2}\rho\chi +\frac{3}{4}R^{IJ}\sigma^{IJ}\chi,\nn\\
\left[\delta_1,\delta_2\right]\!{\rm D}^I &=& \xi^\mu\partial_\mu {\rm D}^I + i[\Lambda,{\rm D}^I] +2\rho {\rm D}^I+3R^{IJ}{\rm D}^J\ ,
\ee
where
\be
&&\xi^\mu = 2i\bar\epsilon_1\gamma^\mu\epsilon_2\ ,\ \
\Lambda = -2i\bar\epsilon_1\gamma^\mu\epsilon_2A_\mu+2\bar\epsilon_1\epsilon_2\phi^3, \nn \\
&&\Theta^{\mu\nu} = D^{[\mu}\xi^{\nu]} +\xi^\lambda\omega_\lambda^{\mu\nu},\ ,\ \
\rho = \frac{2i}{5}D_\mu(\bar\epsilon_1\gamma^\mu\epsilon_2), \nn \\
&&R^{IJ} = \frac{2i}{5}(\bar\epsilon_1\gamma^\mu\sigma^{IJ}D_\mu\epsilon_2-D_\mu\bar\epsilon_1\gamma^\mu \sigma^{IJ}\epsilon_2)\ .
\ee
These results are all found in \cite{Hosomichi:2012ek}.
We also consider an off-shell generalization of the hypermultiplet algebra.
As the off-shell generalization of the whole $8$ SUSY algebra cannot be achieved
with a finite number of auxiliary fields, we follow the strategy of \cite{Hosomichi:2012ek}
and demand that we have a single off-shell nilpotent supercharge, with which
one can do localization calculations. In other words, we are interested in a SUSY which
satisfies $\delta^2=0$ off-shell (up to a bosonic symmetry generator) with a given
commuting spinor $\epsilon$ parameter.
With a bosonic $\epsilon$ chosen among the $8$ SUSY generators explained above,
we follow \cite{Hosomichi:2012ek} and consider another bosonic spinor parameter
$\hat\epsilon$ satisfying
\begin{equation}
\epsilon^\dag\epsilon=\hat\epsilon^\dag\hat\epsilon\ ,\ \
(\epsilon^A)^TC\hat\epsilon^{B^\prime}=0\ ,\ \ \epsilon^\dag\gamma^\mu\epsilon
+\hat\epsilon^\dag\gamma^\mu\hat\epsilon=0\ .
\end{equation}
One introduces two auxiliary complex fields $F^A$, having $0$ on-shell values,
and consider the following SUSY transformation with a commuting Killing spinor
(which reduces to our on-shell SUSY upon taking $F^A=0$):
\be\label{hyper-off-shell}
\delta q^A &=& \sqrt{2}i(\epsilon^\dagger)^A\psi \ , \quad \delta \bar{q}_A =-\sqrt{2}i\psi^\dagger\epsilon_A \nn \\
\delta \psi &=& \sqrt{2}\left[-D_\mu q_A\gamma^\mu \epsilon^A +[\phi^3,q_A]\epsilon^A- \frac{3i}{2r}q_A(\sigma^3)^A_{\ \ B}\epsilon^B-\frac{i}{2r}q_A\epsilon^A - iF_{A'}\hat{\epsilon}^{A'}\right] \nn \\
\delta \psi^\dagger &=& \sqrt{2}\left[\epsilon^\dagger_A\gamma^\mu D_\mu \bar{q}^A +\epsilon^\dagger_A[\bar{q}^A,\phi^3] -i \frac{3}{2r}\epsilon^\dagger_A(\sigma^3)^A_{\ \ B}\bar{q}^B-\frac{i}{2r}\epsilon^\dagger_A\bar{q}^A - i(\hat{\epsilon}^\dagger)_{A'}\bar{F}^{A'}\right] \nn \\
\delta F^{A'} &=& \sqrt{2}(\hat{\epsilon}^\dagger)^{A'}\left[-\gamma^\mu D_\mu\psi + \frac{i}{2r}\psi -[\phi^3,\psi]-\sqrt{2}[\chi_A, q^A]\right] \nn \\
\delta\bar{F}_{A'} &=& \sqrt{2}\left[-D_\mu\psi^\dagger\gamma^\mu -\frac{i}{2r}\psi^\dagger+[\psi^\dagger,\phi^3] -\sqrt{2}[\bar{q}_A,(\chi^\dagger)^A]\right]\hat\epsilon_{A'}\ .
\ee
This is a special case of \cite{Hosomichi:2012ek} which has $-\frac{1}{2r}q_A\epsilon^A$,
$\frac{1}{2r}\psi$ terms on the right hand sides with a choice of their mass parameters.
The SUSY algebra for a given commuting $\epsilon$ is
\be
\delta^2q^A &=&\xi^\mu\partial_\mu q^A + i [\Lambda, q^A] + \frac{3}{4} R^{IJ} (\sigma^{IJ} q )^A + \frac{1}{2r}q^A \nn \\
\delta^2\psi &=& \xi^\mu \partial_\mu \psi +\frac{1}{4}\Theta_{\mu\nu}\gamma^{\mu\nu}\psi+i\Lambda\psi + \frac{1}{2r}\psi \nn \\
\delta^2F^{A'} &=& \xi^\mu \partial_\mu F^{A'} + i[\Lambda,F^{A'}] +\frac{5}{4}\hat{R}^{IJ}(\hat\sigma^{IJ}F)^{A'} +\frac{1}{2r} F^{A'}\ ,
\ee
where
\be
&&\xi^\mu = -i\epsilon^\dagger\gamma^\mu\epsilon \ , \ \
\Lambda = i\epsilon^\dagger\gamma^\mu\epsilon A_\mu+\phi \ , \ \
\Theta^{\mu\nu} = D^{[\mu}\xi^{\nu]} +\xi^\lambda\omega_\lambda^{\mu\nu} \ , \nn \\
&&R^{IJ} =- \frac{2i}{5}\epsilon^\dagger\sigma^{IJ}\gamma^\mu D_\mu\epsilon \ , \ \
\hat{R}^{IJ}=\frac{2i}{5}\hat\epsilon^\dagger\hat\sigma^{IJ}\gamma^\mu D_\mu\hat\epsilon\ .
\ee
In the above off-shell formulation, the Lagrangian
invariant under the above $8$ SUSY transformations is given by
\begin{eqnarray}\label{off-shell-action}
\mathcal{L}&=&\frac{1}{g_{YM}^2}{\rm tr}\left[\frac{1}{4}F_{\mu\nu}F^{\mu\nu}
+\frac{1}{2}(D_\mu\phi)^2+\frac{i}{2}\chi^\dag\gamma^\mu D_\mu\chi
-\frac{1}{2}D^ID^I-\frac{i}{r}D^3\phi+\frac{5}{2r^2}\phi^2
-\frac{i}{2}\chi^\dag[\phi,\chi]+\frac{1}{4r}\chi^\dag\sigma^3\chi\right.\nonumber\\
&&+|D_\mu q^A|^2+i\psi^\dag\gamma^\mu D_\mu\psi+|[\phi,q^A]|^2
-D^I(\sigma^I)^A_{\ B}[q^B,\bar{q}_A]-F^{A^\prime}\bar{F}_{A^\prime}
-\frac{i}{r}\phi[q^A,\bar{q}_A]+\frac{3}{r^2}|q^1|^2+\frac{4}{r^2}|q^2|^2\nonumber\\
&&\left.+i\psi^\dag[\phi,\psi]
+\sqrt{2}i\psi^\dag[\chi_A,q^A]-\sqrt{2}i[\bar{q}_A,\chi^{\dag A}]\psi+\frac{1}{2r}\psi^\dag\psi\right]
\end{eqnarray}
The integration contours for $D^I$, ${\rm Re}(F^A)$, ${\rm Im}(F^A)$ are taken to be
on the imaginary axes.
We can generalize the theory preserving 8 SUSY with a continuous parameter $\Delta$,
whose Abelian version we introduced in section 2.1 (corresponding to a generalized
Scherk-Schwarz reduction). The generalized Lagrangian is
\be
\mathcal{L}_{YM} &=& \frac{1}{g_{YM}^2}{\rm tr}\Big[\frac{1}{4}F_{\mu\nu}F^{\mu\nu}+\frac{1}{2}(D_\mu\phi^3)^2 + |D_\mu q^A|^2
+\frac{5}{2r^2}(\phi^3)^2 +\frac{15}{4r^2}|q^A|^2 -\frac{1}{2}{\rm D}^I {\rm D}^I-\frac{i}{r}\phi^3 {\rm D}^3\nn \\
&&+\left([\bar{q}_A,\phi^3]+i\frac{1-2\Delta}{2r}\bar{q}_A\right)\left([\phi^3,q^A]+i\frac{1-2\Delta}{2r}q^A\right)-\bar{q}_A(\sigma^I)^A_{\ \ B}\left([{\rm D}^I,q^B]-\delta^I_3\frac{1-2\Delta}{2r^2}q^B\right) \nn \\
&&+\frac{i}{2}\chi^\dagger \gamma^\mu D_\mu\chi +i\psi^\dagger \gamma^\mu D_\mu \psi + \frac{1}{4r}\chi^\dagger\sigma^3\chi -\bar{F}_{A'}F^{A'}\nn \\
&&- \frac{i}{2}\chi^\dagger[\phi^3,\chi]+i\psi^\dagger\left([\phi^3,\psi]+i\frac{1-2\Delta}{2r}\psi\right) +\sqrt{2}i\psi^\dagger[\chi_A,q^A] -\sqrt{2}i[\bar{q}_A,\chi^\dagger]\psi\Big]\ .
\ee
When $\Delta =1$, it becomes our previous action with $16$ SUSY.
It is invariant under
\be\label{hyper-off-shell}
\delta q^A &=& \sqrt{2}i(\epsilon^\dagger)^A\psi \ , \quad \delta \bar{q}_A =-\sqrt{2}i\psi^\dagger\epsilon_A \nn \\
\delta \psi &=& \sqrt{2}\left[-D_\mu q_A\gamma^\mu \epsilon^A+[\phi^3,q_A]\epsilon^A- \frac{3i}{2r}q_A(\sigma^3)^A_{\ \ B}\epsilon^B +i\frac{1-2\Delta}{2r}q_A\epsilon^A- iF_{A'}\hat{\epsilon}^{A'}\right] \nn \\
\delta \psi^\dagger &=& \sqrt{2}\left[\epsilon^\dagger_A\gamma^\mu D_\mu \bar{q}^A +\epsilon^\dagger_A[\bar{q}^A,\phi^3] - \frac{3i}{2r}\epsilon^\dagger_A(\sigma^3)^A_{\ \ B}\bar{q}^B +i\frac{1-2\Delta}{2r}\epsilon^\dagger_A\bar{q}^A- i(\hat{\epsilon}^\dagger)_{A'}\bar{F}^{A'}\right] \nn \\
\delta F^{A'} &=& \sqrt{2}(\hat{\epsilon}^\dagger)^{A'}\left[-\gamma^\mu D_\mu\psi -i\frac{1-2\Delta}{2r}\psi -[\phi^3,\psi]+\sqrt{2}i[\chi_A, q^A]\right] \nn \\
\delta\bar{F}_{A'} &=& -\sqrt{2}\left[D_\mu\psi^\dagger\gamma^\mu-i\frac{1-2\Delta}{2r}\psi^\dagger-[\psi^\dagger,\phi^3] -\sqrt{2}i[\bar{q}_A,(\chi^\dagger)^A]\right]\hat\epsilon_{A'}
\ee
and same SUSY transformation on vector multiplet fields.
One can identify the fields and parameters in our theory and \cite{Hosomichi:2012ek} as
\begin{equation}
\phi^3 =- i\sigma_{HST} \ , \quad \chi = -i\lambda_{HST} \ ,
\quad i \sigma^I D^I = D_{HST} \ ,
q^A = q^A_{HST} \ , \quad \psi = \sqrt{2}\psi_{HST}\ .
\end{equation}
Our parameter $\Delta-\frac{1}{2}$ is proportional to their hypermultiplet mass
associated with
a global symmetry. The off-shell SUSY algebra for the vector multiplet is the same, while
the off-shell algebra for a given commuting Killing spinor for hypermultiplet becomes
\be
\delta^2q^A &=&\xi^\mu\partial_\mu q^A + i [\Lambda, q^A] + \frac{3}{4} R^{IJ}
(\sigma^{IJ} q )^A - \frac{1-2\Delta}{2r}q^A \nn \\
\delta^2\psi &=& \xi^\mu \partial_\mu \psi +\frac{1}{4}\Theta_{\mu\nu}\gamma^{\mu\nu}\psi+i\Lambda\psi - \frac{1-2\Delta}{2r}\psi \nn \\
\delta^2F^{A'} &=& \xi^\mu \partial_\mu F^{A'} + i[\Lambda,F^{A'}] +\frac{5}{4}\hat{R}^{IJ}(\hat\sigma^{IJ}F)^{A'} -\frac{1-2\Delta}{2r} F^{A'}\ .
\ee
In section 3.3, we shall use this theory to calculate the perturbative partition function,
which we suggest would be part of a more general superconformal index.
\section{5-sphere partition function as a 6d index}
In this section, we study the partition function of the maximal SYM on $S^5$
and the theory with $8$ SUSY that we considered in the previous section.
We first consider the theory with $16$ SUSY. We choose a commuting Killing spinor
$\epsilon$ to be a linear combination $\epsilon=\epsilon^++\epsilon^-$, where
$\epsilon^\pm$ satisfy the following projection conditions
\begin{equation}
\sigma^3\epsilon^\pm=\pm\epsilon^\pm\ ,\ \
\gamma^5\epsilon^\pm=\mp i\gamma^{12}\epsilon^\pm=\pm i\gamma^{34}\epsilon^\pm=\epsilon^\pm\ .
\end{equation}
The explicit expressions for $\epsilon^\pm$ are (see appendix A, $\eta_\pm$ there)
\begin{equation}
\epsilon^\pm=e^{\pm\frac{3i}{2}y}\epsilon^\pm_0\ ,
\end{equation}
where constant spinors $\epsilon^\pm_0$ are conjugate to each other as
$(\epsilon^+_0)^\ast=C\otimes(i\sigma^2)\epsilon^-_0$. $y$ is the angle coordinate
of the Hopf fiber of $S^5$, over a $\mathbb{CP}^2$ base. The following spinor bilinears
will be useful:
\begin{equation}
v^\mu=\epsilon^\dag\gamma^\mu\epsilon\ ,\ \ J_{\mu\nu}=\nabla_\mu v_\nu
=-2i\bar\epsilon^+\gamma_{\mu\nu}\epsilon^+\ (=e^1\wedge e^2-e^3\wedge e^4)\ .
\end{equation}
$J_{\mu\nu}$ is the Kahler 2-form of $\mathbb{CP}^2$, and $v^\mu$ is the translation
generator along the fiber $y$ direction. They satisfy $\nabla_\rho J_{\mu\nu}=2v_{[\mu}g_{\nu]\rho}$.
With this $\epsilon$, we can add any term to the Lagrangian $\mathcal{Q}V$ which is exact in the corresponding supercharge $\mathcal{Q}$, without changing the value of the final integral. This
property relies on the property that the chosen $\mathcal{Q}$ is nilpotent, $\mathcal{Q}^2=0$.
Actually, since the chosen Killing spinor $\mathcal{Q}$ is real, it amounts to picking one Poincare
supercharge $Q$ with its conjugate conformal supercharge $S$, and taking a real linear combination
of the two. Thus one actually finds
\begin{equation}
\mathcal{Q}^2\sim\{Q,S\}=({\rm symmetry\ generator})\ ,
\end{equation}
where the right hand side comes from a suitable
combination of the bosonic generators appearing in the $\{Q,S\}$ part of the $SU(4|2)$
algebra. Thus, only when we choose $V$ in the $\mathcal{Q}$-exact deformation $\mathcal{Q}V$
to be neutral under the rotation of $\{Q,S\}$ (which we will do), one is guaranteed
not to change the partition function by deformation.
$\mathcal{Q}$-exact deformations that we introduce are
\begin{eqnarray}\label{exact-vector}
\delta\left((\delta\chi)^\dag\chi\right)&=&\frac{1}{2}F_{\mu\nu}F^{\mu\nu}
-\frac{1}{4}\epsilon^{\mu\nu\rho\sigma\tau}v_\mu F_{\nu\rho}F_{\sigma\tau}
+(D_\mu\phi)^2+\left(\frac{1}{r}\phi+iD^3\right)^2-(D^1)^2-(D^2)^2\nonumber\\
&&-i\chi^\dag\gamma^\mu D_\mu\chi-i[\phi,\chi^\dag]\chi+\frac{1}{r}\chi^\dag
\sigma^3\chi-\frac{1}{2r}\chi^\dag v_\mu\gamma^\mu\sigma^3\chi
-\frac{i}{4r}J_{\mu\nu}\chi^\dag\gamma^{\mu\nu}\chi
\end{eqnarray}
for the vector multiplet, and
\be\label{exact-hyper}
&&\frac{1}{2}\delta\Big((\delta\psi)^\dagger\psi+\psi^\dagger(\delta\psi^\dagger)^\dagger\Big)
\nonumber\\
&&= |D_\mu q^A|^2-\frac{i}{r}v^\mu\bar{q}\sigma^3 D_\mu q -\frac{i}{r}v^\mu \bar{q}D_\mu q
+\frac{1}{r^2}\bar{q}_1 q^1 +\frac{4}{r^2}\bar{q}_2 q^2 +|[\phi^3,q^A]|^2 - \bar{F}_{A'}F^{A'}\nonumber\\
&&\hspace{0.5cm}+i\psi^\dagger\gamma^\mu D_\mu \psi -\frac{1}{2r}v^\mu \psi^\dagger\gamma_\mu\psi-\frac{i}{4r}J^{\mu\nu}\psi^\dagger\gamma_{\mu\nu}\psi +
i\psi^\dagger[\phi^3,\psi]
\ee
for the hypermultiplet. Here, the commuting Killing spinors are normalized to satisfy
$\epsilon^\dag\epsilon=1$, and traces are assumed for every terms.\footnote{We add
two conjugate terms to form $V$ in the hypermultiplet part (\ref{exact-hyper}),
as this simplifies the determinant calculation significantly.}
It is easy to see that the corresponding $V$'s that we introduced above all
commute with $\{Q,S\}$. As $V$ are chosen to take the form of $(\delta\Phi)^\dag\Phi$
for various fields $\Phi$,
the charge of $V$ under $\{Q,S\}$ is basically the inverse of the charge carried by
the chosen SUSY generator $\delta$. As this is a linear combination of $Q,S$, it suffices
to show that $Q,S$ are both neutral under the rotation of $\{Q,S\}$. This trivially follows
from the following Jacobi identities (with $\{Q,Q\}=\{S,S\}=0$)
\begin{equation}
[\{Q,S\},Q]=0\ ,\ \ [\{Q,S\},S]=0\ .
\end{equation}
Thus we are allowed to introduce the above $\mathcal{Q}$-exact deformations.
\subsection{Perturbative partition function and Casimir energies}
Turning on the above $\mathcal{Q}$-exact deformations and taking their
coefficients to be large, one is led to a Gaussian path integral around a set
of saddle points satisfying
\begin{equation}\label{saddle}
F_{\mu\nu}=\frac{1}{2}\sqrt{g}\epsilon_{\mu\nu\alpha\beta\gamma}
v^\alpha F^{\beta\gamma}\ ,\ \ D_\mu\phi=0\ ,\ \ D^3=\frac{i}{r}\phi\ ,\ \
D^1=D^2=0\ ,\ \ q_1=q_2=0\ ,\ \ F^{1^\prime}=F^{2^\prime}=0\ ,
\end{equation}
while taking all fermion fields to zero. These equations can be easily obtained
by studying the vanishing SUSY condition, or alternatively by taking the bosonic part of
the $\mathcal{Q}$-exact deformations (\ref{exact-vector}), (\ref{exact-hyper}) to be zero.
See also \cite{Kallen:2012cs,Hosomichi:2012ek} which study the same equations.
The first equation of (\ref{saddle}) is for the self-dual Yang-Mills instantons
on the $\mathbb{CP}^2$ base (in the convention that the Kahler 2-form of $\mathbb{CP}^2$
is anti-self-dual), while any component of the gauge field along the Hopf fiber
is demanded to be zero from $v^\mu F_{\mu\nu}=0$. The configurations solving this
equation are called `contact instantons' in some literatures, and recently studied
on general contact
manifolds, including $S^5$ \cite{Harland:2011zs,Wolf:2012gz}. In particular,
\cite{Wolf:2012gz} explores the twistor construction of this equation, which
could probably be used to get a better understanding of its solutions.
If the topological quantum number for these instantons on $\mathbb{CP}^2$ is nonzero,
one would get various non-perturbative corrections to the partition function. We shall
study them in the next subsection, and focus on the perturbative part here.
With $F_{\mu\nu}=0$, one can take the gauge connection to zero on $S^5$.
The only nonzero fields at the saddle point are $D^3$
and $\phi$ satisfying $D^3=\frac{i}{r}\phi$, where $\phi$ is a constant
Hermitian matrix. The saddle point is thus parameterized by the Hermitian matrix $\phi$,
which we should exactly integrate over after all other Gaussian
fluctuations are integrated out. The integration over $\phi$ will come with various factors
of integrands. Part of them will come from the contributions from the determinants of
quadratic fluctuations, which we shall turn to in a while. There is also a factor of integrand
that one obtains by plugging in the saddle point values of the fields into the original action.
Plugging in nonzero $\phi$ and $D^3$ into (\ref{off-shell-action}), this becomes
\begin{equation}
e^{-S_0}\ ,\ \ S_0=\frac{1}{g_{YM}^2}\int d^5x\sqrt{g}\frac{4}{r^2}{\rm tr}\phi^2=
\frac{4\pi^3 r^3}{g_{YM}^2}{\rm tr}\phi^2=\frac{2{\rm tr}(\pi r\phi)^2}{\beta}
\equiv\frac{2\pi^2{\rm tr}\lambda^2}{\beta}\ ,
\end{equation}
where $\int d^5x\sqrt{g}=\pi^3r^5$ on a 5-sphere with radius $r$,
$\frac{4\pi^2}{g_{YM}^2}=\frac{1}{r_1}=\frac{2\pi}{r\beta}$ yields
$\frac{4\pi^3 r^3}{g_{YM}^2}=\frac{2\pi^2r^2}{\beta}$, and we defined
$\lambda\equiv r\phi_0$ at the last step. The natural justification of
the $g_{YM}$ vs. $\beta$ relation we use here is given in section 3.2.
From the vector multiplet bosons, one has to diagonalize the differential operator
appearing in the following quadratic fluctuations in the $\mathcal{Q}$-exact deformation
($\phi$ fluctuations simply decouple to yield a constant factor, which cancels out
with other constant factors):
\be\label{vector-eqn}
&&\frac{1}{2}F_{\mu\nu}F^{\mu\nu} -\frac{1}{4} \epsilon^{\mu\nu\lambda\rho\sigma}v_\mu F_{\nu\lambda}F_{\rho\sigma} \nn \\
&=& A^\mu\left(-D^2 \delta_\mu^\nu + D_\mu D^\nu + 4\delta_\mu^\nu -2(J_{\mu\lambda}v\cdot D + 2v_{[\mu}J_{\lambda]\rho}D^\rho) g^{\lambda\nu}\right)A_\nu\ .
\ee
Using the basis of the vector spherical harmonics introduced in appendix A to diagonalize
the differential operator, one obtains the following determinant:
\begin{eqnarray}
{\det}_{V,b}&=&\prod_{\alpha\in{\rm root}}
\prod_{k=0}^\infty\left(k+4+ir\alpha(\phi)\right)^{\frac{(k+1)(k+2)^2(k+3)}{12}}
(k+ir\alpha(\phi))^{\frac{(k+1)(k+2)^2(k+3)}{12}-2\times\frac{(k+1)(k+2)}{2}}\nonumber\\
&&\times\prod_{k=1}^\infty\prod_{m=-k+1}^k\left(k^2+4k-2m+9+r^2\alpha(\phi)^2
\right)^{\frac{(k+2)((k+2)^2-m^2)}{8}}\ .
\end{eqnarray}
See appendix A for the derivation.
From the vector multiplet fermions, one obtains
\begin{eqnarray}
\hspace*{-1cm}{\det}_{V,f}&=&\prod_{\alpha\in{\rm root}}
\prod_{k=0}^\infty(k+4+ir\alpha(\phi))^{\frac{(k+1)(k+2)^2(k+3)}{12}}
(k+ir\alpha(\phi))^{\frac{(k+1)(k+2)^2(k+3)}{12}-\frac{(k+1)(k+2)}{2}}
(k+3+ir\alpha(\phi))^{\frac{(k+1)(k+2)}{2}}\nonumber\\
&&\times\prod_{k=1}^\infty\prod_{m=-k+1}^k
\left(k^2+4k-2m+9+r^2\alpha(\phi)^2\right)^{\frac{(k+2)((k+2)^2-m^2)}{8}}\ .
\end{eqnarray}
Dividing the two contributions, one obtains
\begin{equation}
\frac{{\det}_{V,f}}{{\det}_{V,b}}=\prod_{\alpha\in{\rm root}}
\prod_{k=0}^\infty(k+3+ir\alpha(\phi))^{\frac{(k+1)(k+2)}{2}}
\prod_{k=1}^\infty(k+ir\alpha(\phi))^{\frac{(k+1)(k+2)}{2}}
=\prod_{\alpha\in{\rm root}}\prod_{k=1}^\infty(k+ir\alpha(\phi))^{k^2+2}\ .
\end{equation}
This agrees with the result found in \cite{Kallen:2012cs}.
From the hypermultiplet, one obtains from the two complex scalars $q_1$, $q_2$ the following:
\begin{equation}
{\rm det}_{H,b}=\prod_{\alpha\in{\rm root}}
\prod_{k=0}^\infty\frac{1}{\left((k+2)^2+r^2\alpha(\phi)^2\right)^{\frac{(k+1)(k+2)^2(k+3)}{12}}}
\prod_{m=-k}^k\frac{1}{\left(k^2+4k+1+2m+r^2\mathcal{\mathcal{\mathcal{}}}\alpha(\phi)^2\right)^{\frac{(k+2)((k+2)^2-m^2)}{8}}}
\end{equation}
where $m=k,k-2,k-4,\cdots,-k$. From hypermultiplet fermions,
\begin{eqnarray}
{\rm det}_{H,f}&=&\prod_{\alpha\in{\rm root}}
\prod_{k=1}^\infty(k+2+ir\alpha(\phi))^{\frac{(k+1)(k+2)^2(k+3)}{6}
-\frac{(k+1)(k+2)}{2}}\\
&&\times\prod_{k=0}^\infty(k+1+ir\alpha(\phi))^{\frac{(k+1)(k+2)}{2}}\prod_{m=-k+1}^k
\left(k^2+4k+1+2m+r^2\alpha(\phi)^2\right)^{\frac{(k+2)((k+2)^2-m^2)}{8}}\ .\nonumber
\end{eqnarray}
The net hypermultiplet determinant is
\begin{equation}
\frac{{\rm det}_{H,f}}{{\rm det}_{H,b}}=\prod_{\alpha\in{\rm root}}
\prod_{k=1}^\infty\frac{1}{(k+ir\alpha(\phi))^{k^2}}\ .
\end{equation}
Again see appendix A for the derivation.
Combining the contributions from vector and hypermultiplets,
one obtains the following perturbative determinant
\begin{equation}\label{final-measure}
\prod_{\alpha\in{\rm root}}\prod_{k=1}^\infty(k+ir\alpha(\phi))^2
=\prod_{\alpha\in{\rm root}}\prod_{k=1}^\infty(k^2+r^2\alpha(\phi)^2)=
\prod_{\alpha\in{\rm root}}\frac{2\pi\sinh(\pi r\alpha(\phi))}{\pi r\alpha(\phi)}\ .
\end{equation}
Here, we used $\prod_{k=1}^\infty k^2\!=\!2\pi$ after zeta function regularization
\cite{Kallen:2012cs}. The integration over the Hermitian matrix can be replaced
by an integration over the eigenvalues with
the Vandermonde measure inserted, which cancels $\alpha(\phi)$ in the
denominator of (\ref{final-measure}). Combining it with the classical Gaussian
measure, and defining dimensionless variables $\lambda=r\phi$, one obtains
\begin{equation}
Z_{\rm pert}=\frac{1}{|W|}\int d\lambda\
e^{-\frac{2\pi^2{\rm tr}(\lambda^2)}{\beta}}
\prod_{\alpha\in{\rm root}}2\sinh(\pi\alpha(\lambda))\ ,
\end{equation}
where $W$ is the Weyl group. One thus finds that the perturbative part of
the partition function, with $16$ SUSY, takes the form of the pure Chern-Simons
partition function on $S^3$ \cite{Marino:2002fk}. See also
\cite{Kapustin:2009kz,Tierz:2002jj} for some later studies of the same expression.
For simplicity, let us first consider the case with $U(N)$ gauge group in detail.
Pure Chern-Simons partition function with $U(N)$ gauge group is
\cite{Marino:2002fk,Kapustin:2009kz}
\begin{eqnarray}\label{U(N)-CS}
Z_{CS}&=&\frac{1}{N!}\int\prod_id\lambda_ie^{-ik\pi\lambda_i^2}
\prod_{i\neq j}2\sinh\left(\pi\lambda_{ij}\right)\\
&=&\nonumber\frac{(-1)^{\frac{N(N-1)}{2}}e^{-\pi iN^2/4}e^{-\frac{\pi i}{6k}N(N^2-1)}}
{k^{N/2}}\prod_{m=1}^{N-1}\left[2\sin\frac{\pi m}{k}\right]^{N-m}\ .
\end{eqnarray}
Comparing with our partition function, one should replace
$-\frac{i\pi}{k}$ by $\frac{\beta}{2}$. Thus one finds
\begin{eqnarray}\label{perturbative}
Z_{\rm pert}&=&(-1)^{N(N-1)/2}\left(\frac{i\beta}{2\pi}\right)^{N/2}
e^{-\pi iN^2/4}e^{\frac{N(N^2-1)}{12}\beta}
\prod_{m=1}^{N-1}\left[i(e^{\frac{m\beta}{2}}-e^{-\frac{m\beta}{2}})\right]^{N-m}\\
&=&(-1)^{N(N-1)/2}\left(\frac{i\beta}{2\pi}\right)^{N/2}
e^{-\pi iN^2/4}i^{N(N-1)/2}
e^{\frac{N(N^2-1)}{6}\beta}\prod_{m=1}^{N-1}(1-e^{-\beta m})^{N-m}\nonumber
\end{eqnarray}
where we used $\sum_mm(N-m)=\frac{N(N^2-1)}{6}$. The factors of $i$'s combine to
be $i^{N^2/2}e^{-\pi i N^2/4}=1$, and we shall not be careful about the
possible overall minus sign. Thus, regarding $q\equiv e^{-\beta}$ as the fugacity
of $\epsilon-R_1$ in the 6d theory, the perturbative contribution itself would have
taken the form of an index, supposing that we can somehow trade away the prefactor
$\left(\frac{\beta}{2\pi}\right)^{N/2}$. We shall see in the next subsection that,
combining this factor with the non-perturbative contribution will make the latter
to be an index. So we ignore this piece in this subsection and proceed.
More generally, for the gauge group $G$ with rank $r$, the 3-sphere Chern-Simons
partition function is given by \cite{Witten:1988hf,Marino:2011nm}
\begin{equation}\label{general-CS}
Z_{CS}=[\det(C)]^{1/2}\frac{i^{\frac{|G|}{2}-r}}{k^{r/2}}e^{-\frac{\pi i}{6k}c_2|G|}
\prod_{\alpha>0}2\sin\frac{\pi(\alpha\cdot\rho)}{k}\ ,
\end{equation}
where $|G|$ is the dimension of the gauge group, $c_2$ is the dual Coxeter number,
and $C$ is the inverse matrix of the inner product in the weight space (or Cartan
matrix for simply connected gauge group $G$).
$\rho$ is the Weyl vector which is the summation of all fundamental weights.
To get the correct information on the BPS state degeneracies, we will also have to
include the non-perturbative corrections, which we discuss in the next subsection.
However, from (\ref{perturbative}) one immediately observes a multiplicative factor
$e^{-\beta(\epsilon_0)_{\rm pert}}$ with
\begin{equation}
(\epsilon_0)_{\rm pert}=-\frac{N(N^2-1)}{6}
\end{equation}
for $U(N)$. For general gauge group, one finds from (\ref{general-CS}) that
$(\epsilon_0)_{\rm pert}$ becomes
\begin{equation}\label{pert-casimir}
(\epsilon_0)_{\rm pert}=-\frac{c_2|G|}{6}\ ,
\end{equation}
where $c_2$ is the dual Coxeter number and $|G|$ is the dimension of the
semi-simple part of the gauge group $G$.
See the next subsection for a nonperturbative correction to this result (subleading in
the large $N$ limit). This factor can naturally be interpreted as the `vacuum
energy' or the Casimir energy. However, one should be careful about the identification of
$\epsilon_0$ as the Casimir energy, as one has to pick a regularization when one
computes the vacuum energy. For instance, in free QFT, the Casimir energy is the summation of
all bosonic mode frequencies minus the fermionic mode frequencies, divided by $2$. In a radially
quantized CFT, one can employ the zeta function regularization or the energy
regularization as done, e.g. in \cite{Aharony:2003sx}. However, our result
above can be regarded as a `Casimir energy'
obtained by using $\epsilon-R_1$ as a regulator, as this is the only charge which can appear
in this index. In many theories, including 4d SCFTs admitting free theory limits, we illustrate
that different regularizations lead to different $\epsilon_0$. However, we observe that the
index Casimir energy contains useful information on the degrees of freedom of the theory.
In particular, in all 4d SCFT examples that we study in appendix B, we find that the index
Casimir energy is always proportional to the Casimir energy by a universal coefficient, and
is also a particular linear combination of the $a$ and $c$ central charge of the CFT.
Thus, we think our index Casimir energy could also be an interesting measure of the
degrees of freedom.
Coming back to our case, the coefficient in front of $c_2|G|$ has no reason to agree
with the true Casimir energy, due to the usage of an index version of regularization
and renormalization. Indeed, the calculation of the large $N$ Casimir energy of
$AdS_7\times S^4$ from gravity yields \cite{Awad:2000aj}
\begin{equation}
\epsilon_0=-\frac{5N^3}{24\ell}\ ,
\end{equation}
where $\ell$ is the $AdS_7$ radius. The coefficients $-\frac{1}{6}$ and $-\frac{5}{24}$ in
front of $N^3$ are indeed different. However, our $\epsilon_0$ robustly reproduces the expected $N^3$ behavior in the large $N$ limit, which we regard as a significant microscopic
evidence supporting that $N$ M5-branes exhibit $N^3$ some of degrees. It should be interesting to study the gravity dual of (\ref{pert-casimir}). It is also curious
that the finite rank index Casimir energy from the perturbative part is proportional to
$c_2|G|$, which is the anomaly coefficient of the $ADE$ $(2,0)$ theory in 6d
\cite{Harvey:1998bx}. See, however, section 3.3 for a subleading correction that is
contained in a non-perturbative correction that we propose.
It should be very desirable to pursue the virtue of the index Casimir energy that we
have studied here (and in appendix B), and try to relate it to other measures of degrees
of freedom such as central charges, as we illustrate in appendix B with concrete examples
in 4d.
To better understand the perturbative expansion structure of $Z_{\rm pert}$,
We expand it in the large $N$ limit with small 't Hooft coupling, $\beta\rightarrow 0$,
$N\rightarrow\infty$, $N\beta={\rm fixed}\ll 1$. The perturbative `free energy'
$F_{\rm pert}=-\log Z_{\rm pert}$ is expanded as
\begin{eqnarray}\label{weak-coupling}
F_{\rm pert}&=&-\frac{N}{2}\log\frac{\beta}{2\pi}-\frac{\beta N(N^2-1)}{6}
-\sum_{n=1}^N(N-n)\log(1-e^{-n\beta})\nonumber\\
&\rightarrow&-\frac{N^2}{2}\log(N\beta)+\frac{3N^2}{4}
+N^2\sum_{n=1}^\infty a_n(N\beta)^n
\end{eqnarray}
with some $\mathcal{O}(1)$ coefficients $a_n$, where we used
\begin{eqnarray}
\sum_{n=1}^N n\log n&=&\frac{N^2}{2}\log N-\frac{N^2}{4}+\frac{N}{2}\log N
+\frac{1}{12}\log N+\mathcal{O}(1)\nonumber\\
\log N!&=&N\log N-N+\frac{1}{2}\log(2\pi N)+\mathcal{O}(N^{-1})
\end{eqnarray}
to obtain the first two leading terms in $N\beta$.
Here, at each order in $N\beta$, we only showed
the leading terms in $N$. Especially, the last infinite sum is acquiring contributions
from the planar diagrams. Naturally, the leading term in the weak coupling expansion
scales like $N^2$. It is also of some interest to study a sub-leading term
at the 2-loop ($\sim N^3\beta$) order, to study the 5d aspect of the 6d Casimir
energy that we obtained above. From the exact expression given on the first
line of (\ref{weak-coupling}), this order term comes from two sources. It first comes
from the second term $\beta(\epsilon_0)_{\rm pert}=-\frac{\beta N(N^2-1)}{6}$.
Also, the last summation which takes the form
$-\sum_n d_n\log(1-e^{-\beta E_n})$ yields a term at the same order,
$\frac{\beta}{2}\sum_nd_n E_n$ with $d_n=N-n$ and $E_n=n$.
Adding them, one obtains the following net (finite $N$) 2-loop contribution
\begin{equation}\label{2-loop-combination}
-\frac{\beta N(N^2-1)}{6}+\frac{\beta}{2}\sum_{n=1}^Nn(N-n)
=-\frac{\beta N(N^2-1)}{12}\ .
\end{equation}
So in the weak coupling regime, the information on the Casimir energy
$(\epsilon_0)_{\rm pert}$ in $F_{\rm pert}$ totally goes to the 2-loop order,
but also became ambiguous at this order by combining with an extra
contribution from $\beta/2\sum_n d_n E_n$.
We also work out a strong coupling large $N$ limit of $F_{\rm pert}$,
keeping $\beta$ finite ($N\beta\rightarrow\infty$). It turns out that
the leading behavior is the same as the 't Hooft large $N$ limit with
$\lambda={\rm fixed}\gg 1$, although the sub-leading terms are differently
organized in the two limits. The former limit is perhaps
more interesting, as this regime admits a dual gravity description in a
Euclidean $AdS_7$ which is supersymmetrically compactified along the time
direction with finite radius. On the first line of (\ref{weak-coupling}),
the second term is dominant in this strong coupling large $N$ limit:
\begin{equation}\label{strong}
F_{\rm pert}\sim-\frac{\beta N^3}{6}\ .
\end{equation}
So it acquires contribution only from the large $N$ Casimir energy.
Even with the instanton correction provided in the next subsection (proved in
\cite{Kim:2012qf}), this is the dominant term in the full free energy.
\begin{figure}[t!]
\begin{center}
\includegraphics[width=12cm]{2-loop.eps
\caption{2-loop diagrams and the large $N$ double-line diagrams with $N^3$ scalings
for $SU(N)$}\label{2-loop}
\end{center}
\end{figure}
With the above understandings, it is easy to trace how the $N^3$ scaling, or more precisely
the $c_2|G|$ factor, appears in the Casimir energy, from the viewpoint of perturbative QFT.
$\beta\epsilon_0$ appears in $F_{\rm pert}$ at the sub-leading 2-loop order $\beta\sim
g_{YM}^2$ at weak coupling. Considering possible 2-loop vacuum bubbles such as those shown
as the Feynman diagrams of Fig \ref{2-loop}, it is clear that the group theoretic factors
are always $f^{abc}f^{abc}=c_2|G|$. Also, in the large $N$ double line notation for $U(N)$,
the appearance of 3 single loops naturally yields the $N^3$ scaling. Strictly speaking,
this argument does not say that $(\epsilon_0)_{\rm pert}$ itself shows the
$N^3$ behavior, but just that the combination $(\epsilon_0)_{\rm pert}+\frac{1}{2}\sum_nd_nE_n$
in (\ref{2-loop-combination}) does. But also with our exact result (\ref{perturbative}),
it still looks like a heuristic 5d insight of the appearance of $N^3$ in
$\epsilon_0$.\footnote{This was our original motivation that the $N^3$ scaling
in $\epsilon_0$ could appear from 5d gauge theories.}
Actually, such a group theoretic argument at $\mathcal{O}(g_{YM}^2)$ applies to
any quantum field theories with adjoint fields, in any dimension. For instance,
this is basically the reason why $N(N^2-1)$ or $c_2|G|$ appears in the pure Chern-Simons
partition functions (\ref{U(N)-CS}), (\ref{general-CS}). However, for generic
adjoint QFT's, this is no more than the standard 't Hooft planar contribution at
a particular sub-leading order, or a group theory of quadratic Casimir $f^{abc}f^{abc}$
at finite $N$. It is only because we have a higher dimensional
interpretation (with $g_{YM}^2$ being related to the inverse temperature or the
6th direction's radius in our case) that we can take this $c_2|G|$ or $N^3$
scaling as the physics of 6d $(2,0)$ theory. Also, for generic adjoint QFT's, there
is no guarantee that the strong coupling large $N$ limit would be anything like
(\ref{strong}).\footnote{For some special QFT's, like
pure Chern-Simons theory on $S^3$ whose partition function takes the same form as
our $Z_{\rm pert}$ with $\beta\sim i/k$, one might be able to say more on this
term which scales like $N^3$ (still subleading at weak 't Hooft coupling).
We are not sure if this has any meaning at all, perhaps in a different
physical context.}
We also note that, from the viewpoint of our 5-sphere partition function, it is not
clear at this stage whether $ADE$ gauge theories have any special status to have 6d UV
fixed points, as many arguments go similarly for other gauge groups $BCFG$. For instance,
the index nature of the Chern-Simons index could appear from (\ref{general-CS}), from the
expansion of the sine factors. Just like the $U(N)$ case that we explained, there
is $\beta^{r/2}$ prefactor and possibly some non-integral constant factor which will
obstruct $Z_{\rm pert}$ from being an index. Like the $U(N)$ case, all such factors
should combine with the non-perturbative part to be an index, for the $S^5$ partition
function to be interpretable as a 6d index. It could be that this non-perturbative
corrections, combined with the above prefactors, may violate the 6d index
structure for non-$ADE$ gauge groups. However, one should not confuse the 6d gauge
group and 5d gauge group which appears after compactification. For instance, suitably
twisted compactifications of 6d $ADE$ theories can yield all $BCFG$ gauge groups in 5d
\cite{Tachikawa:2011ch}. Our $BCFG$ partition functions could thus be `twisted indices,'
similar to \cite{Zwiebel:2011wa}.
\subsection{Nonperturbative corrections and $AdS_7$ gravity duals}
To motivate the studies on possible non-perturbative corrections to our partition
function, let us first go back to the 6d index explained in section 2.1, and
study it for the free Abelian 6d theory. This free theory index would be also important
in the $U(N)$ theories, as the overall $U(1)$ degrees are decoupled from the rest
which forms the interacting $A_n$ type $(2,0)$ theory.
In section 2.1, the `letter index' was shown to be $z=\frac{q}{1-q}$, and
the full index is given by
\begin{equation}\label{free-index}
Z_{U(1)}(q)=q^{\epsilon_0}\prod_{n=1}^\infty\frac{1}{1-q^n}\ .
\end{equation}
Here $\epsilon_0$ is the index Casimir energy contribution from the $U(1)$ degrees.
This zero point energy is given by $\epsilon_0=\frac{1}{2}{\rm tr}[(-1)^F(\epsilon-R_1)]$.
This contribution can be calculated from the letter index $z(q)=\frac{q}{1-q}$, as we
review in appendix B (which is explained in detail in \cite{Kim:2009wb}).
The result is
\begin{equation}
\epsilon_0=\frac{1}{2}\lim_{q\rightarrow 1^-}q\frac{d}{dq}z(q)=
\frac{1}{2\beta^2}-\frac{1}{24}\ ,
\end{equation}
where $q=e^{-\beta}$. After renormalization of the first divergent factor, one
obtains $\epsilon_0=-\frac{1}{24}$. This is basically the same as the zeta function
regularization, as the value of $\epsilon-R_1$ from the degrees in the letter index $z$
is $1,2,3,\cdots$. The zeta function regularization yields
$\frac{1}{2}\sum_{n=1}^\infty n=-\frac{1}{24}$. Inserting this in (\ref{free-index}),
the index becomes the inverse of the Dedekind eta function $\eta(\tau)$, where
$\tau$ is given by $q=e^{2\pi i\tau}$.
Thus, for our 5d approach to have any chance to capture the `free' $U(1)$ partition
function, or the partition function for the decoupled degrees coming from overall $U(1)$,
we should be able to find from a 5d calculation a multiplicative factor
$\frac{1}{\eta(\tau)}$. Using the modular property of $\eta(\tau)$,
one obtains the following expansion for small $\beta$:
\begin{equation}\label{overall-weak}
Z_{U(1)}=\left(\frac{\beta}{2\pi}\right)^{\frac{1}{2}}
e^{\frac{\pi^2}{6\beta}}\prod_{k=1}^\infty\frac{1}{1-e^{-\frac{4\pi^2k}{\beta}}}\ .
\end{equation}
This takes the form of a non-perturbative expansion in $\beta$.
Motivated by the above findings,
let us first consider what kind of corrections can appear to our 5d partition function.
From the saddle point equations (\ref{saddle}), Yang-Mills instanton configurations are
allowed on the $\mathbb{CP}^2$ base of $S^5$ in Hopf fibration. In our normalization for
$g_{YM}$, the classical action for $k$ instantons on the $\mathbb{CP}^2$ base is given
by\footnote{If we call the instantons of our saddle points to be self-dual, the Kahler
2-form $J_{\mu\nu}$ of $\mathbb{CP}^2$ is anti-self-dual. So the embedding of $J$ into
Abelian subgroup as $F_{\mu\nu}\sim J_{\mu\nu}$ \cite{Witten:2003ya} is excluded in our problem.}
\begin{equation}
\frac{1}{4g_{YM}^2}\int_{\mathbb{CP}^2}{\rm tr}(F_{\mu\nu}F^{\mu\nu})
=\frac{4\pi^2k}{g_{YM}^2}\ .
\end{equation}
This naturally yields the relation $\frac{4\pi^2}{g_{YM}^2}=\frac{1}{r_1}=\frac{2\pi}{r\beta}$
with $\beta\equiv\frac{2\pi r_1}{r_5}$. We introduced this in the introduction and also
used it in section 3.1. Despite the absence
of the physical D0-brane particle picture, we are suggesting that Euclidean D0-brane
loops which wraps a (possibly contractible) cycle, which we formally regard as time,
would provide the Kaluza-Klein `momentum' (in the sense of Fourier wavenumber) along
the extra circle. More precisely, the Euclidean D0-brane (or instanton)
action on $S^5$ is
\begin{equation}
S_0=\frac{4\pi^2 k}{g_{YM}^2}\cdot 2\pi r=\frac{4\pi^2k}{\beta}\ .
\end{equation}
$2\pi r$ comes from the integration of the Lagrangian over the Hopf fiber direction $y$.
So the non-perturbative correction should take the form of
\begin{equation}\label{instanton-general}
Z=\sum_{k=0}^\infty Z_ke^{-\frac{4\pi^2 k}{\beta}}\ ,
\end{equation}
which fits completely well with (\ref{overall-weak}), apart from the prefactor
$e^{\frac{\pi^2}{6\beta}}$ and $\left(\frac{\beta}{2\pi}\right)^{\frac{1}{2}}$.
To explain the last two factors, let us first turn to $e^{\frac{\pi^2}{6\beta}}$.
The presence of this factor can be understood by noticing that there could be a constant
shift to the supersymmetric actions on $S^5$ without modifying any symmetry. For instance,
\cite{Vafa:1994tf} emphasized in the context of topologically twisted 4d $\mathcal{N}=4$
SYM that there could be couplings of $g_{YM}$ ($\sim\beta$ in our case) to the background
curvature. In our case, on $S^5$, we may have constant couplings like
\begin{equation}
\frac{\alpha}{g_{YM}^2}\int_{S^5}d^5x\sqrt{g}R^2\ ,
\end{equation}
where $R$ is the Riemann scalar curvature of $S^5$ and $\alpha$ is a
dimensionless constant. With a suitable coefficient $\alpha$, this term provides
the factor $e^{\frac{\pi^2}{6\beta}}$. As we have our freedom (or ambiguity) in 5d to
choose our theory on $S^5$, without spoiling any 5d symmetry, we implicitly assume
a certain constant shift of the action of the above form, so that the desired factor
comes out. As we are assuming the completeness of 5d SYM description, at least in
the BPS sector, such curvature couplings are restricted to $\mathcal{O}(\beta^{-3})$,
$\mathcal{O}(\beta^{-2})$, $\mathcal{O}(\beta^{-1})$ in general. So this is fixing a mild
ambiguity to get much more information on the 6d physics.
Now we turn to $\left(\frac{\beta}{2\pi}\right)^{\frac{1}{2}}$. We first note that
the perturbative partition function (\ref{perturbative}) at $N=1$ is just
$\left(\frac{\beta}{2\pi}\right)^{\frac{1}{2}}$. We take this factor from the perturbative
part and combine it with the instanton contribution of the form
(\ref{instanton-general}), to provide a desired factor in (\ref{overall-weak}).
Multiplying this factor from the perturbative part, now the non-perturbative series
(\ref{instanton-general}) takes the form of an index, supposing that the coefficients are
chosen to make (\ref{overall-weak}). So we find that, even for the $U(1)$ theory, the
structure of perturbative/non-perturbative contributions to the $S^5$ partition function
confronts and passes quite a nontrivial consistency test for it be an index.
Let us emphasize at this point that Abelian instantons, which we expect to account for
(\ref{overall-weak}), are not completely well defined purely within field theories,
as they often come with zero sizes which should be regarded as singular instantons.
In fact, non-Abelian instantons (at least in flat space) also have singularities in their
moduli spaces which correspond to small instantons. However, small instantons are often
important to understand various issues in string theory \cite{Witten:1995gx}. Often, giving
non-commutativity to the field theory makes the instanton moduli space smooth, and also makes Abelian instantons to be regular field theory solitons \cite{Nekrasov:1998ss}. This may
correspond to a (perhaps mild) UV completion of the 5d quantum field theory.
With these motivations, we now turn to the non-Abelian instanton corrections.
We only discuss the case with $U(N)$ gauge group. We claim that the full $U(N)$ non-perturbative partition function takes
the form
\begin{equation}
Z(\beta)=Z_{\rm pert}(\beta)Z_{\rm inst}(\beta)\ ,
\end{equation}
where $Z_{\rm pert}$ is given in the previous subsection, and
\begin{equation}\label{U(N)-instanton}
Z_{\rm inst}=\left[Z_{\rm inst}^{U(1)}\right]^N=
e^{\frac{N\pi^2}{6\beta}}\prod_{k=1}^\infty\frac{1}{\left(1-e^{-\frac{4\pi^2k}{\beta}}
\right)^N}=\eta(\tau)^{-N}
\end{equation}
with $\tau\equiv\frac{2\pi i}{\beta}$ (namely, $e^{2\pi i\tau}\equiv e^{-\frac{4\pi^2}{\beta}}$).
(\ref{U(N)-instanton}) takes
the general form of (\ref{instanton-general}), again with a suitable coupling to
the background curvature for the $e^{\frac{N\pi^2}{6\beta}}$ factor.
The proof of (\ref{U(N)-instanton}) will be presented in \cite{Kim:2012qf}, with
generalized to the squashed $S^5$. In this paper, we shall present several nontrivial
evidences and implications of this result.
Before studying the physics of (\ref{U(N)-instanton}), let us note
that the instanton partition functions are usually very simple in theories with $16$ SUSY.
In many important examples, the partition functions are
either $1$ or just functions of the coupling $g_{YM}^2$. On $\mathbb{R}^4$ and
$\mathbb{R}^4\times S^1$, \cite{Nekrasov:2002qd,Nekrasov:2003rj} calculates the instanton
partition function in the so-called Omega deformation $\epsilon_1$, $\epsilon_2$, which
roughly speaking compactifies the non-compact $\mathbb{R}^4$ part of the instanton moduli. When we consider the instanton partition function of maximal SYM, the following
simplifications appear. Although the instanton partition function depend on the VEV of a
scalar (similar to our saddle point value for $\phi$) in generic gauge theories with $8$
SUSY, this dependence disappears at some special values of $\epsilon_1,\epsilon_2$
with $16$ SUSY. To explain some important cases, we first note that when
$\epsilon_1\!=\!\epsilon_2$, the instanton partition function just becomes $1$. This was a
crucial element in showing that the $S^4$ partition function for the $\mathcal{N}=4$ SYM
becomes a Gaussian matrix model with
$Z_{\rm pert}=Z_{\rm inst}=1$ \cite{Pestun:2007rz}. On the other hand, with
anti-self-dual Omega background with $\epsilon_1=-\epsilon_2\equiv\hbar$, the instanton
partition function for the $\mathcal{N}=4$ theory becomes independent of the remaining $\hbar$, and depends on $g_{YM}^2$ only. The partition function on the anti-self-dual
Omega background becomes \cite{Nekrasov:2003rj}
\begin{equation}
Z_{\rm inst}=\frac{1}{\eta(\tau)^N}\ \ \ \ \ \ \left(
\tau=\frac{\theta}{2\pi}+\frac{4\pi^2 i}{g_{YM}^2}\right)\ ,
\end{equation}
apart from the possible overall shift for the instanton number in the topologically
trivial sector, like those we discussed above. In particular, the result is the same
both for instantons on $\mathbb{R}^4$ or $\mathbb{R}^4\times S^1$. The instanton correction
(\ref{U(N)-instanton}) we propose on $S^5$ is basically the same as the result on
$\mathbb{R}^4$ or $\mathbb{R}^4\times S^1$, in anti-self-dual Omega background. The
relevance of these simpler cases to (\ref{U(N)-instanton}) is explained in \cite{Kim:2012qf}.
In this paper, we collect some evidences in favor of (\ref{U(N)-instanton}) and
discuss its physical implications.
Firstly, this yields the desired index (\ref{overall-weak}) for $N=1$.
Secondly, the nonperturbative result (\ref{U(N)-instanton}) can be dualized
for $\beta\gg 1$ to
\begin{equation}\label{instanton-dual}
Z_{\rm inst}=\left(\frac{2\pi}{\beta}\right)^{N/2}\eta(i\beta/2\pi)^{-N}\ ,
\end{equation}
using the S-dual modular property of the eta function. The factor $\left(\frac{\beta}{2\pi}\right)^{-N/2}$ in (\ref{instanton-dual}) combines with
a factor $\left(\frac{\beta}{2\pi}\right)^{N/2}$ in the perturbative partition function
(\ref{perturbative}) which prevents an index interpretation of (\ref{perturbative}).
Moving it to the non-perturbative part and combining this with (\ref{U(N)-instanton}),
one finds that both perturbative and non-perturbative parts take the form of an index,
since
\begin{equation}
\left(\frac{\beta}{2\pi}\right)^{N/2}Z_{\rm inst}=\frac{1}{\eta(i\beta/2\pi)^N}
=q^{\frac{N}{24}}\prod_{n=1}^N\frac{1}{(1-q^n)^N}\ ,
\end{equation}
where we defined $q\equiv e^{-\beta}$.
So the structure (\ref{instanton-general}) of instanton expansion conspires well
with the provided prefactor in the perturbative part, to make the whole expression
$Z_{\rm pert}Z_{\rm inst}$ an index. It is somewhat curious to find that the
perturbative and non-perturbative parts have to combine for the 5d SYM to tell us the 6d
physics consistently.\footnote{This sounds a bit similar to the failure of perturbative
finiteness of maximal SYM \cite{Bern:2012di}. The only chance for this theory to be UV
complete is then by combining the full perturbative/non-perturbative effects at the
cutoff scale where the distinction between the two becomes meaningless \cite{Lambert:2012qy}. Although we do not see any serious divergence in our SUSY path integral,
the consistency of 6d physics still requires us to combine to two.}
Most importantly, we shall now show that the non-perturbative completion (\ref{U(N)-instanton}) perfectly agrees with the large $N$ index that we know from
the gravity dual on $AdS_7\times S^4$. Before combining the instanton correction (\ref{U(N)-instanton}), the perturbative
part (\ref{perturbative}) shows a very strange large $N$ behavior. Let us consider the
part which gives the degeneracy information:
\begin{equation}\label{pert-expand}
\prod_{n=1}^{N-1}(1-q^n)^{N-n}=1-(N-1)q^1+\frac{N^2-5N+6}{2}q^2
-\frac{N^3-12N^2+35N-36}{6}q^3+\cdots\ .
\end{equation}
The low energy degeneracy at large $N$ is so large that this part alone will not have
a sensible large $N$ limit: especially it cannot have a large $N$ gravity dual on $AdS$,
which exhibits a low energy spectrum which is completely independent of $N$.
Combining $Z_{\rm inst}$ with the perturbative contribution, one obtains
\begin{equation}\label{full-index}
Z=Z_{\rm pert}Z_{\rm inst}=e^{\frac{N(N^2-1)\beta}{6}}\prod_{n=1}^{N-1}(1-e^{-n\beta})^{N-n}
\cdot e^{\frac{N\beta}{24}}\prod_{n=1}^\infty\frac{1}{(1-e^{-n\beta})^N}\ .
\end{equation}
The large $N$ index, apart from the zero point
energy part, is given by the MacMahon function,
\begin{equation}\label{large-N-QFT}
Z_{N\rightarrow\infty}=\prod_{n=1}^\infty\frac{1}{(1-q^n)^n}\ .
\end{equation}
Again, we used $q\equiv e^{-\beta}$.
We see that the contribution of $\mathcal{O}(N)$ fermionic `letters' at low energy
in (\ref{pert-expand}) cancels with the $\mathcal{O}(N)$ bosonic letter contributions,
leaving $\mathcal{O}(1)$ low energy degeneracy.
\begin{table}[t!]
$$
\begin{array}{c|ccc|c}
\hline &\epsilon&SO(6)&SO(5)&{\rm boson/fermion}\\
\hline p\geq 1&2p&(0,0,0)&(p,0)&{\rm b}\\
p\geq 1&2p+\frac{1}{2}&(\frac{1}{2},\frac{1}{2},\frac{1}{2})
&(p-\frac{1}{2},\frac{1}{2})&{\rm f}\\
p\geq 2&2p+1&(1,0,0)&(p-1,1)&{\rm b}\\
p\geq 3&2p+\frac{3}{2}&(\frac{1}{2},\frac{1}{2},-\frac{1}{2})
&(p-\frac{3}{2},\frac{3}{2})&{\rm f}\\
\hline \cdot&\frac{7}{2}&(\frac{1}{2},\frac{1}{2},-\frac{1}{2})
&(\frac{1}{2},\frac{1}{2})&{\rm b\ (fermionic\ constraint)}\\
\hline
\end{array}
$$
\caption{BPS Kaluza-Klein fields of $AdS_7\times S^4$ supergravity}\label{sugra}
\end{table}
Now we study the same index in the large $N$ limit from the $AdS_7\times S^4$
supergravity, giving the weight $q^{\epsilon-R_1}$ to the low energy gravity states.
Again choosing a particular $Q$, $S$ and viewing our index as the unrefined version
of the superconformal index
associated with $Q,S$, it suffices for us to consider the contribution from gravity
states preserving these SUSY. The Kaluza-Klein field contents are given in
\cite{Bhattacharya:2008zy}, and we only list the BPS fields in Table \ref{sugra}.
Collecting all the contributions, one obtains the single particle gravity index
\begin{eqnarray}
I_{\rm sp}(q)&=&\frac{1}{(1-q)^3}\left[\sum_{p=1}^\infty\sum_{n=0}^pq^{2p-n}
-3\sum_{p=1}^\infty\sum_{n=1}^pq^{2p+1-n}+3\sum_{p=2}^\infty\sum_{n=1}^{p-1}q^{2p+1-n}
-\sum_{p=3}^\infty\sum_{n=1}^{p-2} q^{2p+1-n}+q^3\right]\nonumber\\
&=&\frac{q}{(1-q)^2}=q+2q^2+3q^3+4q^4+\cdots\ .
\end{eqnarray}
The multiparticle exponent of $I_{\rm sp}$ yields the MacMahon function
\begin{equation}
I_{\rm mp}(q)=\exp\left[\sum_{n=1}^\infty\frac{1}{n}I_{\rm sp}(q^n)\right]
=\prod_{n=1}^\infty\frac{1}{(1-q^n)^n}
\end{equation}
as the multiparticle gravity index on $AdS_7\times S^4$, precisely agreeing
with the result (\ref{large-N-QFT}) from 5d gauge theory calculation.
It is curious to find that the non-perturbative correction (\ref{U(N)-instanton})
yields an $\mathcal{O}(N)$ correction to the `index Casimir energy' obtained from
the perturbative part. One obtains
\begin{equation}
\epsilon_0=(\epsilon_0)_{\rm pert}+(\epsilon_0)_{\rm inst}=
-\frac{N(N^2-1)}{6}-\frac{N}{24}\ .
\end{equation}
It would be curious to see if this can be understood as a combination
of various anomaly coefficients of the 6d $(2,0)$ theory \cite{Harvey:1998bx},
similar to what we observe for the 4d Casimir energy in appendix B.
Finally, MacMahon function that we obtained at large $N$ is well-known as
the generating function for the 3 dimensional Young diagrams. Curiously, our finite
$N$ index (\ref{full-index}) is the generating function for the 3d Young diagrams
with their heights being no longer than $N$. This index also coincides with
the vacuum character of the $W_N$ algebra, apart from a factor of eta function
\cite{Gaberdiel:2010ar}. It should be interesting to seek for the physical meanings of
these apparently surprising relations, if any.\footnote{We thank Amihay Hanany for
discussions which led us to the observation on the restricted 3d Young
diagrams. Also, we thank Rajesh Gopakumar for explaining the coincidence with
the $W_N$ vacuum character.}
\subsection{Generalizations}
One can easily modify the localization calculus for the maximal SYM on $S^5$
to include two chemical potentials conjugate to $\epsilon-R_1$ and $\epsilon-R_2$.
One has to calculate the $S^5$ partition function for the theory with $8$ SUSY,
with two parameters $\beta\sim g_{YM}^2$ and $\Delta$.
By following the calculation similar to the case with $\Delta=1$ in section 3.1
and appendix A, one obtains similar cancelations between non-BPS modes and finds
the following integrand of the Hermitian matrix integral.
Firstly, the classical contribution and the determinant from the vector multiplet
part does not change compared to the analysis in the previous section. The
hypermultiplet contribution changes as
\begin{eqnarray}\label{hyper-det}
&&\prod_{\alpha\in{\rm root}}\prod_{k=0}^\infty\left(k+1+\Delta+ir\alpha(\phi)
\right)^{-\frac{(k+1)(k+2)}{2}}
\left(k+2-\Delta+ir\alpha(\phi)\right)^{-\frac{(k+1)(k+2)}{2}}\nonumber\\
&&=\prod_{\alpha\in{\rm root}}
\prod_{k=1}^\infty\left(k-1+\Delta+ir\alpha(\phi)\right)^{-\frac{k^2-k}{2}}
\left(k+1-\Delta+ir\alpha(\phi)\right)^{-\frac{k^2+k}{2}}\ .
\end{eqnarray}
Notice that our previous partition function at $\Delta=1$ is same as that with
$\Delta=0$, as the two points just exchange the roles of $R_1$ and $R_2$.\footnote{This
essentially gives the determinant for a hypermultiplet in a real representation, if one replaces $\alpha(\phi)$ in (\ref{hyper-det}) by $\mu(\phi)$, where $\mu$ runs over
the weights in the representation. For a complex representation, one has to multiply
a similar factor with $\mu(\phi)$ replaced by $-\mu(\phi)$, and then take square root to
get the determinant \cite{Kim:2012qf}.} The full
integrand, apart from the Gaussian factor and the Vandermonde measure, is given by
\begin{equation}\label{general-determinant}
\prod_{\alpha\in{\rm root}}
\prod_{k=1}^\infty\frac{(k+ir\alpha(\phi))^{k^2+2}}
{\left(k-1+\Delta+ir\alpha(\phi)\right)^{\frac{k^2-k}{2}}
\left(k+1-\Delta+ir\alpha(\phi)\right)^{\frac{k^2+k}{2}}}\ .
\end{equation}
The exact integration with Gaussian measure and the Vandermonde determinant
does not seem to be as simple as our previous example with $16$ SUSY.
The above infinite product requires regularization. Various factors in
(\ref{general-determinant}) are all regularized in the literatures using zeta
function regularization. One obtains (we use the fact that adjoint representation
is real to obtain the second line)
\begin{eqnarray}
&&\prod_{\alpha\in{\rm root}}
\prod_{k=1}^\infty(k+ir\alpha(\phi))^2\cdot\frac{(k+ir\alpha(\phi))^{k^2}}
{\left(k-1+\Delta+i\alpha(\phi)\right)^{\frac{k^2}{2}}
\left(k+1-\Delta+i\alpha(\phi)\right)^{\frac{k^2}{2}}}\cdot
\left(\frac{k-1+\Delta+ir\alpha(\phi)}{k+1-\Delta+ir\alpha(\phi)}\right)^{\frac{k}{2}}\nonumber\\
&&\longrightarrow\prod_{\alpha\in{\rm root}}\frac{2\pi\sinh(\pi r\alpha(\phi))}{\pi r\alpha(\phi)}
\cdot e^{\frac{1}{2}f(ir\alpha(\phi))-\frac{1}{2}f(1-\Delta+ir\alpha(\phi))}\cdot e^{-\frac{1}{2}\ell(1-\Delta+ir\alpha(\phi))}\ ,
\end{eqnarray}
where the function $f(x)$ (even in $x\rightarrow -x$) is given by \cite{Kallen:2012cs}
\begin{equation}
f(x)=\frac{i\pi x^3}{3}+x^2(1-e^{-2\pi ix})+\frac{ix{\rm Li}_2(e^{-2\pi i x})}{\pi}
+\frac{{\rm Li}_3(e^{-2\pi ix})}{2\pi^2}-\frac{\zeta(3)}{2\pi^2}\ ,
\end{equation}
and the (odd) function $\ell(x)$ is given by \cite{Jafferis:2010un}
\begin{equation}
\ell(x)=\frac{i\pi x^2}{2}-x\log(1-e^{2\pi ix})
+\frac{i{\rm Li}_2(e^{2\pi ix})}{2\pi}-\frac{i\pi}{12}\ .
\end{equation}
The matrix integral is given by
\begin{equation}
\frac{1}{|W|}\int d\lambda e^{-\frac{2\pi^2{\rm tr}(\lambda^2)}{\beta}}
\prod_{\alpha\in{\rm root}}2\sinh(\pi\alpha(\lambda))
e^{\frac{1}{2}f(i\alpha(\lambda))-\frac{1}{2}f(1-\Delta+i\alpha(\lambda))
-\frac{1}{2}\ell(1-\Delta+i\alpha(\lambda))}\ ,
\end{equation}
where $\lambda\equiv r\phi$.
We first note that the limit $\beta\rightarrow\infty$, $\Delta\rightarrow 2$ with
fixed $\beta(2-\Delta)>0$, towards the half-BPS partition function, is quite singular
and may drastically change the nature of the matrix integral. Firstly, the strong coupling
limit $\beta\rightarrow\infty$ takes the Gaussian measure to $1$. Secondly, the second
term in the denominator, at $k=1$ yields a factor $1-\Delta+ir\alpha(\phi)$ which at $\Delta=2$
completely cancels the zeros in the $\sinh$ measure. So in this limit, there are no short
distance repulsions between different eigenvalues.
We still have a parameter $\beta_H=\beta(2-\Delta)$ which gives the fugacity $q=e^{-\beta_H}$
of the half-BPS partition function. One can thus consider calculating the matrix integral
in a series expansion of $\beta_H$, and compare with the expected half-BPS partition function.
As $(q_1q_2)^{\epsilon_0}$ becomes infinity with negative $\epsilon_0$, we expect to have
a divergent prefactor multiplying the conventional half-BPS partition function.
For simplicity, let us consider the $U(N)$ half-BPS partition function.
The $U(N)$ partition function for half-BPS states is given by \cite{Bhattacharyya:2007sa}
\begin{equation}\label{half-BPS}
Z=\prod_{n=1}^N\frac{1}{1-q^n}\ ,
\end{equation}
up to a divergent multiplicative factor, with $q=e^{-\beta_H}$ as defined in the
previous paragraph. It will be interesting to see whether our result, supplemented by the
instanton correction of \cite{Kim:2012qf}, reproduces (\ref{half-BPS}).
\section{Discussions}
In this paper, we explored the possibility that partition functions of SYM on $S^5$
could capture the indices of the 6d $(2,0)$ theory on $S^5\times S^1$. The 5d field theories
are carefully chosen, by first studying the Scherk-Schwarz reductions of Abelian $(2,0)$
theories on $S^5\times S^1$ on the circle, and then trying to generalize to non-Abelian theories on $S^5$. We showed that the partition function for the maximal SYM on $S^5$
captures the physics of the 6d $(2,0)$ theory in a surprisingly accurate and detailed manner.
Firstly, the partition function takes the form of an index, which from a naive 5d perspective
has no reason to be true. Generalizing the idea to other 5d theories on $S^5$, the requirement that the partition function take the form of an index could severely restrict the class of
theories having a 6d UV fixed point. For instance, it should be desirable to further study
the index for the $(2,0)$ theory with more complicated chemical potentials, from 5d gauge
theories with as little as $2$ SUSY (those preserved by the most refined superconformal
index). Also, studying our partition function for other gauge groups will also be interesting. One can also study a 5d reduction of the 6d $(1,0)$ superconformal theories.
We find that our index captures the $N^3$ some of degrees of freedom by what we called
the `index Casimir energy,' which is a Casimir energy like quantity appearing in
the index. It should be interesting to see if this is an observable which is worth further
studies. Also, possible relations to other suggested measures of degrees of freedom could
be interesting. Derivation of our index Casimir energy from the gravity dual should
also be very important.
We showed that the index calculated
from the 5d maximal SYM with $U(N)$ gauge group completely agrees with the supergravity
index on $AdS_7\times S^4$ in the large $N$ limit. We find this as quite a nontrivial
signal that our approach is on the right way. Similar successful matching
of instanton partition function on $\mathbb{R}^4\times S^1$ and the DLCQ supergravity
index on $AdS_7\times S^4$ was found in \cite{Kim:2011mv}.
The perturbative partition function that we find for maximal SYM on $S^5$ turns out
to be identical to the pure Chern-Simons partition function on $S^3$. Possible physical
connections between the two observables are not clear to us at the moment.
However, inspired by the fact that the Jones polynomial and other topological invariants
were studied by Wilson loop observables in Chern-Simons theories \cite{Witten:1988hf},
one may ask if the Wilson loops in 5d gauge theories can play interesting roles as well.
Earlier works on Wilson loops in 5d SYM include \cite{Young:2011aa}.
\vskip 0.5cm
\hspace*{-0.8cm} {\bf\large Acknowledgements}
\vskip 0.2cm
\hspace*{-0.75cm} We are grateful to Dongmin Gang, Eunkyung Koh, Kimyeong Lee, Sangmin Lee,
Costis Papageorgakis, Jaemo Park, Jeong-Hyuck Park and Shuichi Yokoyama for discussions, and
especially to Jungmin Kim for collaboration and comments. S.K. thanks Sunil Mukhi and
Alessandro Tomasiello for discussions on related subjects, and Marcos Marino for discussions
on Casimir energies a while ago. We also thank many valuable comments on the first
version of this paper, especially those from Rajesh Gopakumar, Amihay Hanany,
Kazuo Hosomichi, Daniel Jafferis, Igor Klebanov, Costis Papageorgakis, Leonardo Rastelli,
Yuji Tachikawa, Martin Wolf and Maxim Zabzine. This work is supported by the BK21
program of the Ministry of Education, Science and Technology (SK), the National Research
Foundation of Korea (NRF) Grants No. 2010-0007512 (HK, SK), 2012R1A1A2042474 (SK),
2012R1A2A2A02046739 (SK) and 2005-0049409 through the Center for Quantum Spacetime
(CQUeST) of Sogang University (SK).
|
{
"timestamp": "2013-05-17T02:01:59",
"yymm": "1206",
"arxiv_id": "1206.6339",
"language": "en",
"url": "https://arxiv.org/abs/1206.6339"
}
|
\section{Introduction}
The problem of bounding the size of a code depends heavily on the code family that we are considering. In this paper we are interested in three types of codes: linear codes, systematic codes and non-linear codes. Referring to the subsequent section for rigorous definitions, with {\bf linear codes} we mean linear subspaces of $(\FF_q)^n$, while with {\bf non-linear codes} we mean (following consolidated tradition) codes that are not necessarily linear. In this sense, a linear code is always a non-linear code, while a non-linear code may be a linear code, although it is unlikely. Systematic codes form a less-studied family of codes, whose definition is given in the next section.
Modulo code equivalence all (non-zero) linear codes are systematic and all systematic codes are non-linear. In some sense, systematic codes stand in the middle between linear codes and non-linear codes. The size of a systematic code is directly comparable with that of a linear code, since it is a power of the size of $\FF_q$.
In this paper we are interested only in {\bf theoretical bounds}, that is, bounds on the size of a code that can be obtained by a closed-formula expression, although other algorithmic bounds exist (e.g. the Linear Programming bound \cite{CGC-cd-art-Dels73}).
The algebraic structure of linear codes would suggest the knowledge of a high number of bounds strictly for linear codes, and only a few bounds for the other case. Rather surprisingly, the academic literature reports only one bound for linear codes, the Griesmer bound (\cite{CGC-cd-art-griesm60}), no bounds for systematic codes and many bounds for non-linear codes. Among those, we recall: the Johnson bound (\cite{CGC-cd-art-john62},\cite{CGC-cd-art-john71},\cite{CGC-cd-book-huffmanPless03}), the Elias-Bassalygo bound (\cite{CGC-cd-art-bass65},\cite{CGC-cd-book-huffmanPless03}), the Levenshtein bound (\cite{CGC-cd-art-lev98}), the Hamming (Sphere Packing) bound and the Singleton bound (\cite{CGC-cd-book-pless98}), and the Plotkin bound (\cite{CGC-cd-art-plotk60}, \cite{CGC-cd-book-huffmanPless03}).\\
Since the Griesmer bound is specialized for linear codes, we would expect it to beat the other bounds,
but even this does not happen, except in some cases. So we have an unexpected situation where the bounds holding for the more general case are numerous and beat bounds holding for the specialized case.\\
\indent
In this paper we present one (closed-formula) bound (Bound ${\mathcal A}$ ) for a large parte of non-linear codes (including all systematic codes), which is an improvement of a bound by Litsyn and Laihonen in \cite{CGC-cod-art-litlai98}. The crux of our improvement is a preliminary
result presented in Section \ref{secMainThm}, while in Section 4 we are able to prove Bound ${\mathcal A}$ .
Then we restrict Bound ${\mathcal A}$ \ to the systematic/linear case and compare it with all the before-mentioned bounds by computing their values for a large set of parameters (corresponding to about one week of computations with our computers). Our findings are in favour of Bound ${\mathcal A}$ and are reported in Section 5. For large values of $q$, our bound provides the best value in the majority of cases.\\
The only bound that we never beat is Plotkin's, but its range is very small (the distance has to be at least $d > n(1-1/q)$) and the cases falling in this range are a tiny portion with large $q$'s.\\
\indent
For standard definitions and known bounds, the reader is directed to the original articles or to any recent good book, e.g. \cite{CGC-cd-book-huffmanPless03} or \cite{CGC-cd-book-pless98}.
\section{Preliminaries}
\label{prel}
We first recall a few definitions.\\
Let $\FF_q$ be the finite field with $q$ elements, where $q$ is any power of any prime.\\
Let $n\geq k\geq 1$ be integers.
Let $C \subseteq \FF_q^n, C \ne \emptyset$. We say that $C$ is an $(n,q)$ {\bf code}. Any $ c\in C$ is a {\bf word}. Note that here and afterwards a ``code'' denotes what is called a ``non-linear code'' in the introduction. \\
Let $\phi:(\mathbb{F}_q)^k \rightarrow (\mathbb{F}_q)^n$ be an injective function and let $C = {\rm Im}(\phi)$. We say that $C$ is an $(n,k,q)$ {\bf systematic code} if $\phi(v)_i = v_i$ for any $v \in (\FF_q)^k$ and any
$1 \le i \le k$.
If $C$ is a vector subspace of $(\mathbb{F}_q)^n$, then $C$ is a {\bf linear} code. Clearly any non-zero linear code is equivalent to a systematic code. \\
From now on, $\FF$ will denote $\FF_q$ and $q$ is understood.\\
We denote with $\d(c,c')$ the {\bf (Hamming) distance} of two words $c,c' \in C$, which is the number of different components between $c$ and $c'$. We denote with $d$ a number such that $1 \le d \le n$ to indicate the {\bf distance of a code}, which is $d = \min_{c,c' \in C,c \ne c'}\{\d(c,c')\}$. Note that a code with only one word has, by convention, distance equal to infinity. The whole $\FF^n$ has distance $1$, and $d = n$ in a systematic code is possible only if $k=1$.\\
From now on, $n,k$ are understood.
\begin{definition}\label{ball}
Let $l,\,m \in \NN$ such that $l \leq m$. In $\mathbb{F}^m$, we denote by $B_x(l,m)$ the set of vectors with distance from the word $x$ less than or equal to $l$, and we call it the \textbf{ball}
centered in $x$ of radius $l$.\\
For conciseness, $B(l,m)$ denotes the ball centered in the zero vector.
\end{definition}
\noindent
Obviously, $B(l,m)$ is the set of vectors of weight less than or equal to $l$ and
$$
|B(l,m)| \; = \; \sum_{j=0}^{l} \binom{m}{j}(q-1)^j.
$$
We also note that any two balls having the same radius over the same field contain the same number of vectors.
\begin{definition}
The number $A_q (n, d)$ denotes the maximum number of words in a code over $\FF_q$ of length $n$ and distance $d$.
\end{definition}
\section{A first result for a special code family}\label{secMainThm}
The maximum number of words in an $(n,d)$ code can be smaller than $A_q (n, d)$ if we have extra constraints on the weight of words.
The following result is an example and it will be instrumental of the proof of Bound ${\mathcal A}$ .
\begin{theorem}[]
\label{thmEPS}
Let $C$ be an $(n,d)$-code over $\FF^n$. Let $\epsilon \ge 1$ be such that for any $c \in C$ we have $\mathrm{w}(c) \ge d+\epsilon$.
Then
$$ |C| \le A_q(n,d) - \frac{|B(\epsilon,n)|}{|B(d-1,n)|}$$
\end{theorem}
\begin{proof}
$C$ belongs to the set of all codes with distance $d$ and contained in $\FF^{n} \setminus B_0(d+\epsilon-1,n)$.
Let $D$ be any code of the largest size in this set, then
\begin{align}\label{eq1}
|C| \le |D|
\end{align}
Clearly, any word $c$ of $D$ has weight $\mathrm{w}(c)\geq d+\epsilon$.
Consider also $\bar{D}$, the largest code over $\FF^{n}$ of distance $d$ such that $D \subseteq \bar{D}$.
By definition, the only words of $\bar{D}$ of weight greater than $d+\epsilon-1$ are those of $D$, while all other words of $\bar{D}$ are confined to the ball $B_0(d+\epsilon-1,n)$.
Thus
\begin{align}\label{eq0}
|C| \le |D| \le |\bar{D}| \le A_q(n,d)
\end{align}
and
$$\bar{D} \setminus D \subseteq B_0(d+\epsilon-1,n)$$
Let $\rho = d-1$ and $r = d+\epsilon-1$, so that $r-\rho = \epsilon$, and let $N = \bar{D} \cap B_0(r,n)$. We have:
\begin{align}\label{eq2}
D = \bar{D} \setminus N, \qquad |D| = |\bar{D}| - |N|
\end{align}
We are searching for a lower bound on $|N|$, in order to have an upper bound on $|D|$.
We start with proving
\begin{align}\label{eqBx}
B_0(r-\rho,n) \subseteq \bigcup_{x \in N}B_{x}(\rho,n)
\end{align}
Consider $y \in B_0(r-\rho,n)$. If for all $x \in N$ we have that $y \notin B_x(\rho,n)$, then $y$ is a vector whose distance from $N$ is at least $\rho+1$.
Since $y \in B_0(r-\rho,n)$, also its distance from $\bar{D}\setminus N$ is at least $\rho+1$. Therefore, the distance of $y$ from the whole $\bar{D}$ is at least
$\rho+1=d$ and so we can obtain a new code $\bar{D} \cup \{y\}$ containing $D$ and with distance $d$, contradicting
the fact that $|\bar{D}|$ is the largest size for such a code in $\FF^{n}$. So, (\ref{eqBx}) must hold.
A direct consequence of (\ref{eqBx}) is
\begin{align*}
& |N|\cdot|B_{x}(\rho,n)| \ge |B_0(r-\rho,n)| \,,
\end{align*}
which gives
\begin{align}\label{eq3}
|N| \ge \frac{|B_0(r-\rho,n)|}{|B_{x}(\rho,n)|} = \frac{|B_0(\epsilon,n)|}{|B_{x}(d-1,n)|}
\end{align}
Using (\ref{eq1}), (\ref{eq0}), (\ref{eq2}) and (\ref{eq3}), we obtain the desired bound:
\begin{align*}
|C| \le |D| &= |\bar{D}| - |\bar{D} \cap B_0(d+\epsilon-1,n)| \\
& \le A_q(n,d) - \frac{|B_0(\epsilon,n)|}{|B_{x}(d-1,n)|}
\end{align*}
\end{proof}
\section{An improvement of the Litsyn-Laihonen bound}
In 1998 Litsyn and Laihonen prove a bound for non-linear codes: \\ Theorem~1 of \cite{CGC-cod-art-litlai98}, which we write with our notation as follows.
\begin{theorem}[Litsyn-Laihonen bound]
\label{thmLLboundold}
Let $1 \le d \le n$. Let $t \in \mathbb{N}$ be such that $t \le n-d$. Let $r \in \NN$ be such that $d-2r \le n-t$, $0 \le r \le t$ and $0 \le r \le \frac{1}{2}d$. Then
$$A_q(n,d) \le \frac{q^t}{|B(r,t)|}A_q(n-t,d-2r)$$
\end{theorem}
Let $C$ be an $(n,d)$-code over $\FF$, let $k=\lfloor \log_q(|C|) \rfloor$. We say that $C$ is \emph{systematic-embedding} if $C$ contains a systematic code $D$ with size $|D|=q^k$. Obviously a systematic code is systematic-embedding with $D=C$. Moreover if the code is linear then $k$ is the dimension of $C$.\\
All known families of maximal codes are either systematic codes or systematic-embedding codes (see e.g., \cite{CGC-cd-art-preparata}, \cite{CGC-kerd72} and \cite{CGC-cd-art-goethal}).
\\
We are ready to show a strengthening of Theorem \ref{thmLLboundold} restricted to systematic-embedding codes: Bound ${\mathcal A}$ . In the proof we follow initially the outline of the proof of \cite{CGC-cod-art-litlai98}[Theorem 1] and then we apply Theorem \ref{thmEPS}.
\begin{theorem}[Bound ${\mathcal A}$]
\label{thmLLbound}
Let $1 \le d \le n$. Let $t \in \mathbb{N}$ be such that $t \le n-d$. Let $r \in \NN$ be such that $d-2r \le n-t$, $0 \le r \le t$ and $0 \le r \le \frac{1}{2}d$. Suppose that there is an $(n,d)$-code $C$ over $\FF$ such that $|C|=A_q(n,d)$ and $C$ is systematic-embedding. Let $t \le k = \lfloor \log_q(|C|) \rfloor $. Then
$$
A_q(n,d) \le \frac{q^t}{|B(r,t)|} \left(A_q(n-t,d-2r) - \frac{|B(r,n-t)|}{|B(d-2r-1,n-t)|} + 1 \right)
$$
\end{theorem}
\begin{proof}
We consider an $(n,d)$ code $C$ such that $|C| = A_q(n,d)$ and $C$ is systematic-embedding. By hypothesis $C$ must exist.
We number all words in $C$ in any order: $C=\{ c_i \mid 1\leq\ i\leq A_q(n,d)\}$. \\ We indicate the $i$-th word with $c_i = (c_{i,1},\dots,c_{i,n})$.
We puncture $C$ as follows:
\begin{enumerate}
\item[(i)] we choose any $t$ columns among the $k$ columns of the systematic part of $C$, $1\leq j_1,\dots,j_t\leq n$; since two codes are equivalent w.r.t. column permutations we suppose $j_1=1,\dots,j_t=t$.\\
Let us split each word $c_i \in C$ in two parts
\begin{align*}
\tilde{c_i} = (c_{i,1},\dots,c_{i,t}) \quad \bar{c_i} = (c_{i,t+1},\dots,c_{i,n}),\qquad \mbox{ so} \quad c_i = (\tilde{c_i},\bar{c_i}).
\end{align*}
\item[(ii)] We choose a $z\in \FF^t$.
\item[(iii)] We collect in $I$ all $i$'s s.t. $d(z,\tilde{c_i}) \le r$;
\item[(iv)] We delete the first $t$ components of $\{c_i \mid i \in I\}$.
\end{enumerate}
Then the punctured code $\bar{C}_z$ obtained by (i),(ii),(iii) and (iv) is:
\begin{align*}
\bar{C}_z = \{\bar{c_i} \mid i\in I\}=\{\bar{c_i} \mid 1\leq i\leq A_q(n,d), d(z,\tilde{c_i}) \le r\}
\end{align*}
We claim that we can choose $z$ in such a way that $\bar{C}_z$ is equivalent to a code with the following properties:
\begin{align}
\label{eqLeng} & \bar{n} = n-t \\
\label{eqDist} & \bar{d} \ge d-2r \\
\label{eqCard} & |\bar{C}_z| \ge \frac{|C|}{q^t}|B(r,t)| \\
\label{eqWeig} & \mathrm{w}(\bar{c_i}) \ge d-r \text{ for all } \bar{c_i} \ne 0
\end{align}
(\ref{eqLeng}) is obvious.
As regards (\ref{eqDist}), note that $\d(c_i,c_j) = \d(\tilde{c_i},\tilde{c_j}) + \d(\bar{c_i},\bar{c_j}) \ge d$ and also that $\tilde{c_i},\tilde{c_j}\in B_z(r,t)$ implies $\d(\tilde{c_i},\tilde{c_j}) \le 2r$.
Therefore for any $i\ne j$
$$
2r + \d(\bar{c_i},\bar{c_j}) \ge \d(\tilde{c_i},\tilde{c_j}) + \d(\bar{c_i},\bar{c_j}) \ge d \,.
$$
The proof of (\ref{eqCard}) is more involved and we need to consider the average number $M$ of the $i$'s such that $\tilde{c_i}$ happens to be in a sphere of radius $r$ (in $\FF^{t}$).
The average is taken over all sphere centers, that is, all vectors $x$'s in $\FF^t$, so that
$$
M= \frac{1}{|\FF^t|} \sum_{x\in \FF^t} |\{i \mid 1\leq i\leq A_q(n,d), \tilde{c_i} \in B_x(r,t)\}| \,.
$$
Let us define a function:
$$
\psi: \FF^t \times \FF^t \longrightarrow \{0,1\}, \qquad
\psi(x,y) =
\bigg \{
\begin{array}{rl}
1, & \d(x,y) \le r \\
0, & \text{otherwise} \\
\end{array}.
$$
Then we can write $M$ and $|B_y(r,t)|$ (for any $y\in \FF^t$) as
$$
M = \frac{1}{q^t} \sum_{x \in \FF^{t}} \sum_{i=1}^{A_q(n,d)} \psi(x,\tilde{c_i}) \qquad
|B_y(r,t)|= \sum_{x \in \FF^t} \psi(x,y) \,.
$$
By swapping variables we get
\begin{align*}
M = \frac{1}{q^t} \sum_{x \in \FF^{t}} \sum_{i=1}^{A_q(n,d)} \psi(x,\tilde{c_i})
= \frac{1}{q^t} \sum_{i=1}^{A_q(n,d)} \sum_{x \in \FF^t} \psi(x,\tilde{c_i})
= \frac{A_q(n,d)}{q^t} |B_{\tilde{c_i}}(r,t)| \,.
\end{align*}
This means that there exists $\hat{x} \in \FF^t$ such that
$$
|\{i \mid 1\leq i\leq A_q(n,d), \tilde{c_i} \in B_{\hat x}(r,t)\}| \geq M \geq \frac{A_q(n,d)}{q^t} |B(r,t)| \,.
$$
In other words, there are at least $\frac{|C|}{q^t} |B(r,t)|$ $c_i$'s such that their $\tilde{c_i}$'s are contained in $B_{\hat x}(r,t)$.
Distinct $c_i$'s may well give rise to the same $\tilde{c_i}$'s, but they always correspond to distinct $\bar{c_i}$'s (see the proof of (\ref{eqDist})),
so there are at least $\frac{|C|}{q^t} |B(r,t)|$ (distinct) $\bar{c_i}$'s such that their corresponding $\tilde{c_i}$'s
fall in $B_{\hat x}(r,t)$. By choosing $z=\hat{x}$ we then have at least $\frac{|C|}{q^t} |B(r,t)|$ (distinct) codewords of $\bar{C}_z$ and so (\ref{eqCard}) follows.
We claim that (\ref{eqWeig}) holds if $0\in C$ and $z = 0$. Infact:
\begin{align*}
& \mathrm{w}(c) = \d(0,c) \geq d, \,\quad \forall c \in C \mbox{ such that } c\neq 0. \\
& z = 0 \quad \implies \qquad y \in B_z(r,t) \iff \mathrm{w}(y) \le r.
\end{align*}
As a consequence, any nonzero word $c_i = (\tilde{c_i},\bar{c_i})$ of weight at most $r$ in $\tilde{c_i}$ has weight at least $d-r$ in the other $n-t$ components.\\
If $0 \notin C$ or $z \ne 0$ we consider a code $C+v$ equivalent to $C$, by choosing the translation $v$ in the following way. By hypothesis of systematic-embedding there exists $\hat{c} \in C$ such that its first $t$ coordinates form the vector $\hat{x}$. By considering $v = \hat{c}$ we obtain the desired code, thus (\ref{eqWeig}) is proved.\\
Now we call $X$ the largest $(\bar{n},d-2r)$-code containing the zero word and such that $\mathrm{w}(\bar{x}) \ge d-r = (d-2r) +r$, $\forall \bar{x} \in X$. Observe that $X$ satisfies (\ref{eqLeng}), (\ref{eqDist}), (\ref{eqCard}), (\ref{eqWeig}) and so $|X| \ge |\bar{C_z}|$. Then we can apply Theorem \ref{thmEPS} to $X \setminus \{0\}$ and $\epsilon = r$, and obtain the following chain of inequalities:
\begin{align*}
\frac{|C|}{q^t} |B(r,t)| \le |\bar{C_z}| \le |X| \le A_q(\bar{n},d-2r) - \frac{|B(r,\bar{n})|}{|B(d-2r-1,\bar{n})|} + 1
\end{align*}
and since $|C| = A_q(n,d)$ we have the bound:
\begin{align*}
A_q(n,d) \le \frac{q^t}{|B(r,t)|}\left(A_q(\bar{n},d-2r) - \frac{|B(r,\bar{n})|}{|B(d-2r-1,\bar{n})|} + 1\right).
\end{align*}
\end{proof}
\subsection{Systematic case}
When we restrict ourselves into the systematic/linear case, then the value $A_q(n,d)$ can only be a power of $q$, and if the dimension of the code $C$ is $k$, then $A_q(n,d) = q^k$. By choosing $t=k$ we have the following corollary:
\begin{corollary}[Bound ${\mathcal B}$]
\label{boundB}
Let $k,d,r \in \NN, d \ge 2,k \ge 1$. Let $n$ be such that there exists an $(n,k,q)$ systematic code $C$ with distance at least $d$.\\
If $0 \le r \le \min\{ \lfloor \frac{d-1}{2} \rfloor,k\} $, then
$$ |B(r,k)| \le A_q(n-k,d-2r) - \frac{|B(r,n-k)|}{|B(d-2r-1,n-k)|} + 1.$$
\end{corollary}
In the systematic/linear case the Litsyn-Laihonen bound becomes:
$$|B(r,k)| \le A_q(n-k,d-2r).$$
Easy computations can be done in the case $d=3$, since in this case $r$ can be at most $1$, so that:
\begin{itemize}
\item $|B(1,k)| = (q-1)k + 1$
\item $A_q(n-k.d-2r) = A_q(n-k,1) = q^{n-k}$
\item $|B(1,n - k)| = (q - 1)(n - k) + 1$
\item $|B(d - 2r - 1, n - k)| = |B(0, n - k)| = 1$
\end{itemize}
Our bound then reduces to:
$$0 \le q^{n-k} - (q - 1)n - 1$$
which is stronger then the Litsyn-Laihonen bound, which in the case $d=3$ reduces to:
$$0 \le q^{n-k} - (q - 1)k - 1.$$
\section{Experimental comparisons with other upper bounds, remarks and conclusion}
We have analyzed the case of linear codes, implementing Bound ${\mathcal B}$. The algorithm to compute the bound takes as inputs $n,d$, and returns the largest $k$ (checks are done until $k=n-d+1$) such that the inequality of the bound holds. If the inequality always holds in this range, $n-d+1$ is returned. Then we compared our upper bound on $k$ with other bounds, restricting those which hold in the general non-linear case to the systematic case. In particular they give a bound on $A_q(n,d)$ instead of a bound on $k$. As a consequence, for example, if the Johnson bound returns the value $A_q(n,d)$ for a certain pair $(n,d)$, then we compare our bound with the value $\lfloor \log_q(A_q(n,d)) \rfloor$, which is the largest power $s$ of $q$ such that $q^s \le A_q(n,d)$.\\
The inequality in Theorem \ref{boundB} involves the value $A_q(n-k,d-2r)$, which is the maximum number of words that we can have in a \emph{non-linear} code of length $n-k$ and distance $d-2r$. To implement Bound ${\mathcal B}$ it is necessary to compute $A_q(n-k,d-2r)$; when this value is unknown (we use known values only in the binary case for $n = 3,\dots,28 , d = 3,\dots,16$), we return instead an upper bound on it, choosing the best between the Hamming (Sphere Packing), Singleton, Johnson, and Elias bound (the Plotkin bound is used when possible). Even though it is a very strong bound, we do not use the Levenshtein bound because it is very slow as $n$ grows. This means that if better values of $A_q(n-k,d-2r)$ can be found, then Bound ${\mathcal B}$ could return even tighter results.\\
Table \ref{tabStat1} and \ref{tabStat2} show a comparison between all bounds' performances, except for Plotkin's, due to its restricted range. For each bound and for each $q=2,3,4,5,7,8,9,11,13,16,17,19,23,25,27,29$ we have computed, in the range $n=3,\dots,100$ and $d=3,\dots,n-1$, the percentage of cases the bound is the ``best'' known bound between Bound ${\mathcal B}$, the Griesmer, Johnson, Levenshtein, Elias, Hamming and Singleton bound. Both wins and draws are counted in the percentage, since more than one bound may reach the best known bound, and in this case we increased the percentage of each best bound. \\
For each $q$ the most performing bound is in bold. Up to $q=7$ the Levenshtein bound is the most performing. From $9 \le q \le 29$ we have that Bound ${\mathcal B}$ is the most performing bound, and in particular, in the case $q=29$, it is the best known bound almost $91\%$ of the times. \\
Table \ref{tabBnewbounds}, instead, shows some cases (one per each $q=7,\dots,29$) where Bound ${\mathcal B}$ beats all other known bounds. This happens from $q=7$, for the range of $n$ considered. The letters B, J, H, G, E, S and L stands respectively for Bound ${\mathcal B}$, Johnson, Hamming (Sphere Packing), Griesmer, Elias, Singleton, and Levenshtein bound. It can be seen that there are some cases where Bound ${\mathcal B}$ is tight, as for the parameters $(9,17,7)$, for which there exist a code with distance $10$.\\
Tables \ref{tabB3-100}, and \ref{tabB3-100bis} give emphasis to the number of times Bound ${\mathcal B}$ improves the best known bound (thus the cases where it beats all other bounds). In the considered range Bound ${\mathcal B}$ starts to beat all other bounds from $q=7$. \\
The third row of Tables \ref{tabB3-100} and \ref{tabB3-100bis} shows how many times (percentage over the number of draws and wins) the value $\delta = \frac{|B(r,n-k)|}{|B(d-2r-1,n-k)|}$ is different from zero. Informally, we can view $\delta$ as the probability to randomly pick up a word of weight less than $r$ from a ball of radius $d-2r-1$. We can notice that this percentage is very high, which means that a weaker version of Bound ${\mathcal B}$, which is similar to the Litsyn-Laihonen bound for systematic codes, could be used, by simply searching the largest $k$ satisfying:
$$ |B(r,k)| \le A_q(n-k,d-2r) + 1$$
It is curious to notice that in all the wins we have $\delta=0$, and that $\delta=0$ also $38094$ times over the $46967$ ties and wins. This means that the weaker version of Bound ${\mathcal B}$ is sufficient to obtain most of the wins and ties in the investigated cases.\\
We note that in general, if $r$ is greater than $d$, we expect $\delta$ big, decreasing very quickly as $r$ increases, holding $d$ fixed; this happens since $|B(x,k)|$ decreases following a gaussian distribution (roughly approximating $|B(x,k)|$ with a factorial), and so any time we subtract $2r$ the decrease is doubled.\\
The fourth row of Tables \ref{tabB3-100} and \ref{tabB3-100bis} shows the ratio between the number of times the Plotkin bound has been used to bound $A_q(n-k,d-2r)$ and the number of draws and wins. Third and fourth row show values which are close for $q$ small and gets further as $q$ grows. This happens because almost all the times that the weaker version (with $\delta = 0$) of Bound ${\mathcal B}$ ties with the best known bound, a strong bound on $A_q(n-k,d-2r)$ must be used, and the strongest bound is Plotkin's, which though has a smaller range of applicability as $q$ grows.\\
We report in the fifth row of Tables \ref{tabB3-100} and \ref{tabB3-100bis} the fact that the maximum ratio $d/n$ reached in the wins of Bound ${\mathcal B}$ grows up to the value $0.64$ and then seems to get stabilized toward $0.5$. This means that Bound ${\mathcal B}$ is a very strong bound for distances which are no more than $\frac{2}{3}$ of the length $n$ for small values of $q$, and no more than half of the length $n$ for bigger values of $q$.\\
Comparisons have been made using inner MAGMA (\cite{CGC-MAGMA}) implementations of known upper bounds, except for the Johnson bound. For this bound we noted that the inner MAGMA implementation could be improved and so we used our own MAGMA implementation for this bound.
\section*{Acknowledgements}
The first two authors would like to thank the third author (their supervisor). Partial results appear in \cite{CGC-cd-phdthesis-ele}. The authors would like to thank: Ludo Tolhuizen (Philips Group Innovation, Research), the MAGMA group and in particular John Cannon.
|
{
"timestamp": "2013-01-01T02:01:42",
"yymm": "1206",
"arxiv_id": "1206.6006",
"language": "en",
"url": "https://arxiv.org/abs/1206.6006"
}
|
\section{Introduction}\label{sec:intro}
Mathematical models are often employed to predict the behavior and evolution of complex physical, chemical, biological and economic phenomena.
Often more realistic mathematical models can be obtained by allowing for some randomness \cite{Oksendal}. In a dynamical system, for example, this randomness can be introduced by adding a noisy driving term, where a noise $x_t$ drives the evolution of the state $y_t$ of the dynamical system [Fig. \ref{fig1}(a)]. Similar models have been employed to describe a wide range of phenomena, from thermal fluctuations of microscopic objects \cite{Nelson} and the evolution of stock prices \cite{Bachelier1900} to heterogeneous response of biological systems to stimuli \cite{Blake2003} and stochasticity in gene expression \cite{Kaern2005}.
Intrinsically noisy phenomena are often modeled using stochastic differential equations (SDEs) \cite{Oksendal}. An SDE is obtained by adding some randomness to a deterministic dynamical system described by an ordinary differential equation (ODE) \cite{Strogatz}. A typical SDE has the form
\begin{equation}\label{eq:SDE}
d y_t = G(y_t) \, dt + \sigma \, dW_t,
\end{equation}
where $G(y)$ is a function representing the deterministic response of the system, $W_t$ is a Wiener process representing the stochastic driving, and $\sigma$ is a scaling constant representing the intensity of the noise. Clearly, the term $\sigma\,dW_t$ is a mathematical model of the physical noise $x_t\,dt$. In particular, any real process has always a correlation time $\tau>0$, while $dW_t$ is strictly uncorrelated, i.e. $\tau=0$; therefore, the smaller the $\tau$ of a real process, the better it is approximated by $dW_t$ \cite{Kloeden}. We remark that Eq. (\ref{eq:SDE}) has a unique solution with a given initial condition $y_0$. This solution satisfies the integral equation $y_t = y_0 + \int_0^tG(y_s)ds + \sigma W_t$ \cite{Oksendal}.
In many real phenomena, the system's state further influences the driving noise intensity [Fig. \ref{fig1}(b)]; for example, the volatility of a stock price may be altered by its actual value \cite{Hamao1990} or gene expression may be regulated by the concentration of its products \cite{Kaern2005}. This multiplicative feedback $F(y)$ leads us to consider an SDE with multiplicative noise:
\begin{equation}\label{eq:SDEmult}
d y_t = G(y_t)\,dt + \sigma\,F(y_t)\,dW_t.
\end{equation}
Unlike Eq. (\ref{eq:SDE}), the integration of Eq. (\ref{eq:SDEmult}) presents some difficulties because $W_t$ is a function of unbounded variation \cite{Oksendal}. The {\it stochastic integral} $\int_{0}^{T} f(y_t) \circ_\alpha dW_t \equiv \lim_{n \to \infty} \sum_{n=0}^{N} f(y_{t_n})\Delta W_{t_n}$, where $t_n = \frac{n+\alpha}{N} T$ and $\alpha \in [0,1]$, leads to different values for each choice of $\alpha$ \cite{Karatzas, Sussmann1978}. Common choices are: the {\it It\={o} integral} with $\alpha=0$ \cite{Ito1944}; the {\it Stratonovitch integral} with $\alpha=0.5$ \cite{Stratonovich1966}; and the {\it anti-It\={o}} or {\it isothermal integral} with $\alpha=1$ \cite{Klimontovich1990}. Alternative values of $\alpha$ may entail dramatic consequences; for example, a Malthusian population growth model with a noisy growth rate can lead either to extinction, if solved with $\alpha = 0$, or to exponential growth, if solved with $\alpha = 0.5$ \cite[Section 5.1]{Oksendal}. Ther
efore, a complete model is
defined by the SDEs \emph{and} the relative convention, which must be determined on the basis of the available experimental data \cite{vanKampen1981}. Various preferences regarding the appropriate choice of $\alpha$ have emerged in various fields in which SDEs have been fruitfully applied. For example, $\alpha = 0$ is typically employed in economics \cite{Oksendal} and biology \cite{Turelli1977}, because of its property of ``not looking into the future,'' referring to the fact that, when the integral is approximated by a sum, the first point of each interval is used. $\alpha = 0.5$ naturally emerges in real systems with non-white noise, i.e. $\tau > 0$, e.g. the SDEs describing electrical circuits driven by a multiplicative noise \cite{Smythe1983}, as a consequence of the Wong-Zakai theorem, which states that, if the Wiener process is substituted by smooth process with $\tau \rightarrow 0$, the resulting SDE obeys the Stratonovich calculus \cite{Wong1969}. Finally, $\alpha =
1$ naturally emerges in physical systems in equilibrium with a heat bath \cite{Ermak1978,Lancon2001,Volpe2010}. Other values of $\alpha$ have also been theoretically proposed \cite{Kupferman2004, Freidlin2004,Hottovy2012}. Clearly, from the modelling perspective the choice of the appropriate SDEs-convention pair is of critical importance, especially when the model is subsequently employed to predict the system's behavior under new conditions.
In this article, we experimentally demonstrate that the convention for a given physical system can actually vary under changing operational conditions. We show that the equation describing the behavior of an electric circuit with multiplicative noise, which usually obeys the Stratonovitch convention ($\alpha = 0.5$) \cite{Smythe1983}, crosses over to obey the It\^o convention ($\alpha = 0$), as certain parameters of the dynamical systems are changed. This transition is continuous, going through all intermediate values of $\alpha$ and we relate it by an explicit formula to the ratio between $\tau$ and the feedback delay time $\delta$, which is always present in any real system. Similar transitions have the potential of dramatically altering a system's long term behavior and, therefore, we argue their possibility should be taken into account in the modelling of systems with SDEs, which are widely employed in economics, biology and physics.
\section{System without feedback}
A system near an equilibrium is often described as a harmonic oscillator, where a {\it restoring force} brings the system back towards the equilibrium. Such harmonic oscillators are widely employed to describe the behavior of systems near their equilibria, from the swinging of pendula to the vibrations of atoms in crystals. In this work, as a paradigmatic experimental realization of an overdamped harmonic oscillator, we consider an RC electric circuit with resistance $R = 1\,\mathrm{k\Omega}$ and capacitance $C = 100\,\mathrm{nF}$; $x_t$ is the driving voltage (applied on the series RC) and $y_t$ the output voltage (measured on C) [Fig. \ref{fig2}(a)]. In order to approximate a Wiener process, we will always use a driving noise with a correlation time much shorter than the typical relaxation time of the circuit, i.e. $\tau \ll RC = 100\,\mathrm{\mu s}$. A detailed description of the circuit is given in the methods section. The output
voltage $y$ experiences an elastic restoring force with elastic constant $k = 1/RC$ towards the $y = 0$ equilibrium state.
In order to understand qualitatively the behavior of our system, in Fig. \ref{fig2}(b) we consider the evolution of $y_t$ for a given initial condition $y_0$ and $\tau = 1.1\,\mathrm{\mu s}$. The dashed line illustrates a sample trajectory for $y_0 = -250\,\mathrm{mV}$: at the beginning $y_t$ decays towards the equilibrium $y = 0\,\mathrm{mV}$ and, afterwards, oscillates around the equilibrium, clearly demonstrating its stochastic nature. Averaging several such trajectories, we obtain solid lines corresponding to different $y_0$, which clearly show that the average trajectory moves towards the equilibrium regardless of $y_0$.
The relevant SDE is Eq. (1) with $G(y) = -k y$ and $\sigma$ proportional to the intensity of the noise, i.e.:
\begin{equation}\label{eq:SDEcircuit}
dy_t = -k\,y_t\,dt + \sigma\,dW_t,
\end{equation}
where we remark that, since $\sigma$ is constant, the choice of $\alpha$ does not affect the solution and, therefore, the convention can be left undetermined.
In a very general sense, a system described by an SDE can be characterized by its stochastic diffusion $S(y)$ and its drift $D(y)$ \cite{vanKampen1981}. Letting the system evolve from an initial state $y$ for an infinitesimal time-step, $S(y)$ is proportional to the variance of the system's state change [inset in Fig. \ref{fig2}(c)] and $D(y)$ to its average [inset in Fig. \ref{fig2}(d)]. $S(y)$ and $D(y)$ can be obtained from an experimental discrete time-series $\{y_0, ... , y_{N-1}\}$ sampling the output signal at intervals $\Delta t$ as
\begin{equation}\label{eq:D2}
S(y) = \frac{1}{2 \Delta t} \left< (y_{n+1} - y_n)^2 \mid y_n \cong y \right>
\end{equation}
and
\begin{equation}\label{eq:D1}
D(y) = \frac{1}{\Delta t} \left< y_{n+1} - y_n \mid y_n \cong y \right>.
\end{equation}
Eqs. (\ref{eq:D2}) and (\ref{eq:D1}) are strictly true in the limit $\Delta t \rightarrow 0$; in experiments $\Delta t$ should be much smaller than the relaxation time of the system \cite{Brettschneider2011} and, in the presence of colored noise, should also meet the condition $\Delta t \gg \tau$.
The symbols in Fig. \ref{fig2}(c) represent the experimental values of $S(y)$ for various $\sigma$ and $\tau$; they clearly show that, for the system described by Eq. (\ref{eq:SDEcircuit}), $S(y)$ is a constant that depends only on the intensity of the input noise $\sigma$, i.e. $S(y) = \frac{1}{2}\sigma^2$, and not on $\tau$. Fig. \ref{fig2}(d) shows the deterministic response $G(y)$ [solid line] and the experimental values of $D(y)$ [symbols]; the values of $D(y)$ lay on $G(y)$ independently of $\sigma$ and $\tau$. We note that the absence of dependence on $\tau$ for both $S(y)$ and $D(y)$ demonstrates that a white noise is a good model for the colored driving noise used in our experiments, i.e. with $\tau \leq 1.1\,\mathrm{\mu s}$.
\section{System with feedback}
Now we introduce a multiplicative feedback in the circuit as shown in Fig. \ref{fig3}(a).
This is achieved by multiplying the input noise by $F(y)$. As shown in Fig. \ref{fig3}(b), $F(y)$ increases linearly between $-80\,\mathrm{mV}$ and $160\,\mathrm{mV}$ and saturates to $0.2\,\mathrm{V}$ ($1\,\mathrm{V}$) for $y <-80\,\mathrm{mV}$ ($y >160\,\mathrm{mV}$). The details of the circuit with multiplicative feedback are given in the methods section. The relevant SDE is:
\begin{equation}\label{eq:SDEmultcircuit}
dy_t = -k\,y_t\,dt + \sigma\,F(y_t)\,dW_t,
\end{equation}
which now requires an explicit specification of $\alpha$ in order to be well-defined. For the case of an electric circuit driven by a colored noise the Stratonovich convention holds, as is expected theoretically from the Wong-Zakai theorem \cite{Wong1969} and has been shown experimentally \cite{Smythe1983}. In fact, we see that the Stratonovich integral also describes the system in our case.
When $\tau = 1.1\,\mathrm{\mu s}$, differently from the case without feedback [Fig. \ref{fig2}(b)], the average trajectories in Fig. \ref{fig3}(c) do not converge to $y=0$, but to $y = 50\,\mathrm{mV}$. This shift of the equilibrium is a consequence of the non-uniformity of $S(y)$ [Fig. \ref{fig3}(d)] due to the presence of a multiplicative feedback.
$D(y)$ [symbols in Fig. \ref{fig3}(e)] is also altered as a consequence of the multiplicative noise. In particular, $D(y)$ is now different from $G(y)$ [solid line in Fig. \ref{fig3}(e)]. The difference between the two is a noise-induced extra-drift
\begin{equation}\label{eq:spuriousdrift}
\Delta D(y) = D(y)-G(y),
\end{equation}
which is represented by the symbols in Fig. \ref{fig3}(f). We remark that $S(y)$ is independent from the interpretation of the underlying SDE \cite{Brettschneider2011}.
The relation between $\Delta D(y)$ and the variation of $S(y)$, i.e. $S'(y) = \frac{\partial S(y)}{\partial y}$, becomes evident considering the good agreement between $\Delta D(y)$ and $0.5 S'(y)$ [dashed line in Fig. \ref{fig3}(f)]. The prefactor $0.5$ corresponds to the $\alpha$ of the Stratonovich interpretation of the SDE (\ref{eq:SDEmultcircuit}), which permits us to make sense of the experimentally observed data.
We can therefore define
\begin{equation}\label{eq:alpha}
\alpha(y) = \frac{\Delta D(y)}{S'(y)},
\end{equation}
which in general may depend on the system under study \cite{vanKampen1981,Ao2007}.
\section{Dependence of $\alpha$ on $\tau/\delta$}
We now proceed to decrease $\tau$. Some samples of $x_t$ are shown in Figs. \ref{fig4}(a)-(c): the oscillations become faster and wider as $\tau$ decreases ($\tau = 0.6$, $0.2$ and $0.1\,\mathrm{\mu s}$ for Figs. \ref{fig4}(a), (b) and (c), respectively). We remark that the shorter the $\tau$, the more closely the conditions for the applicability of the Wong-Zakai theorem \cite{Wong1969} are met. One might expect that the circuit equation will follow the Stratonovich equation even more closely and, thus, we shall expect no change with respect to the situation illustrated in Fig. \ref{fig3}. However, as we can see in Figs. \ref{fig4}(d)-(f), as $\tau$ decreases, the equilibrium position of the system moves back towards $y=0$.
Clearly, this behavior does not depend on the feedback; in fact, $F(y)$ is the same in all the cases, as evidenced by the fact that the experimental values of $S(y)$ do not vary significantly [symbols in Fig. \ref{fig4}(g)]. Instead, it depends on the fact that, as $\tau$ decrease, $D(y)$ [symbols in Fig. \ref{fig4}(h)] tends to $G(y)$ or, equivalently, $\Delta D(y)$ [symbols in Fig. \ref{fig4}(i)] tends to $0$. Using Eq. (\ref{eq:alpha}) and $S'(y)$ [dashed line in Fig. \ref{fig4}(i)], it is possible to calculate $\alpha$, which goes from $0.5$ to $0$ as $\tau$ decreases. Thus, the SDE (\ref{eq:SDEmultcircuit}) shifts from obeying the Stratonovich calculus ($\alpha = 0.5$ for $\tau = 1.1\,\mathrm{\mu s}$) to obeying the It\^o calculus ($\alpha = 0$ for $\tau = 0.1\,\mathrm{\mu s}$). As we have remarked in the introduction, such a Stratonovich-to-It\^o transition can have dramatic effect on the long time dynamics of the system, for example, altering the system's equilibria a
s shown in
Figs. \ref{fig4}(d)-(f).
The reason for this Stratonovich-to-It\^o transition lies in the underlying dynamics of the system modeled by the SDE (\ref{eq:SDEmultcircuit}). For most real physical, chemical, biological and economic phenomena such microscopic dynamics are either too complex to be modeled or simply experimentally inaccessible. This justifies the need to resort to effective models, e.g. SDEs. For this work, however, we have chosen a model system, i.e. an electric circuit, that gives us complete access to the underlying dynamics. We are, therefore, able to track down the observed Stratonovich-to-It\^o transition to the fact that the feedback is not instantaneous, but entails a delay. We measured the feedback delay in the circuit in Fig. \ref{fig3}(a) to be $\delta = 0.4\,\mathrm{\mu s}$ (see methods section). The dots in Fig. \ref{fig5} represent $\alpha$ as a function of $\delta/\tau$. The transition occurs as $\tau$ becomes similar to $\delta$, i.e. $\delta/\tau \approx 1$. This can be qua
litatively explained considering that, if $\delta = 0$, there is a correlation between the sign of $x$ and the time-derivative of $F(y)$, which is the underlying reason why the process converges to the Stratonovich solution \cite{Wong1969}; however, if $\delta \gg \tau$, this correlation disappears effectively randomizing the time-derivative of $F(y)$ with respect to the sign of $x$ and leading to a situation where the system loses its memory.
In order to gain a more precise mathematical understanding of this Stratonovich-to-It\^o transition, we consider the following family of delayed ODEs
\begin{equation}\label{eq:approx}
dy_t = -k\,y_t\,dt + \sigma\,F(y_{t-\delta})\,x_t^{\tau}dt,
\end{equation}
where $x_t^{\tau}$ is a sufficiently regular noise with correlation time $\tau$ and the feedback is delayed by $\delta$. Studying the limits where $\delta,\,\tau \rightarrow 0$ under the condition $\delta/\tau \equiv $ constant, we recover the SDE (\ref{eq:SDEmultcircuit}) with
\begin{equation}\label{eq:mathe_alpha}
\alpha\left(\frac{\delta}{\tau}\right) = \frac{0.5}{1+\frac{\delta}{\tau}}.
\end{equation}
The details of this derivation are given in the methods section. Fig. \ref{fig5} shows the agreement between Eq. (\ref{eq:mathe_alpha}) [grey line] and the experimental data as a function of $\tau$ with fixed $\delta = 0.4\,\mathrm{\mu s}$ [dots].
In order to verify the dependence of $\alpha$ on the ratio $\delta/\tau$, we performed some additional experiments keeping $\tau = 0.4\,\mathrm{\mu s}$ fixed and varying $\delta$. For this purpose, we added a delay line in the feedback branch of the circuit so that we could adjust $\delta = 0.9$ to $5.4\,\mathrm{\mu s}$ (see methods section). The resulting values of $\alpha$ are plotted in Fig. \ref{fig5} as squares and are in good agreement with the theoretical prediction given by Eq. (\ref{eq:mathe_alpha}).
\section{Conclusion}
Our results show that the intrinsic ambiguity in the models of physical, biological and economical phenomena using SDE with multiplicative noise can have concrete consequences. In particular, even if an SDE with a specified convention is given, such convention can vary as a function of the hidden underlying dynamics of the system and therefore as a function of the position on the parameter space where the system is operated. Notably, our result that a Stratonovich-to-It\^o transition occurs if the delay in the feedback ($\delta$) is longer than the correlation time of the noise ($\tau$) has general applicability since instantaneous feedback and white noise are only mathematical approximations. The possibility of such a shift and of its dramatic consequences should be recognized and accounted for in many cases where SDEs with multiplicative noise are routinely employed to predict the behavior and evolution of complex physical, chemical, biological and economic phenomena.
\begin{appendix}
\section{RC Circuit}
The dynamical system employed in our experiments is an RC-electric circuit. A noisy signal $x_t$, which is generated by a function wave generator (Agilent 33250A) and pre-filtered by a low-pass filter to set the desired $\tau$, drives the RC series. The system's state $y_t$ is measured on the capacitor using a digital oscilloscope (Tektronix 5034B, $350\,\mathrm{MHz}$ bandwidth) at $10^6\,\mathrm{sample/s}$. For the circuit with feedback, a high-speed low-noise analog multiplier (AD835) is employed to multiply $x_t$ by the feedback signal (generated by amplifying $y_t$ and adding an offset) before applying it to the RC series. We measured the intrinsic delay of the circuit feedback branch (due to its finite bandwidth) applying a periodic deterministic signal and measuring the delay of the response. The additional delay line was realized by employing an analog variable delay amplifier (Ortec 427A).
\section{Derivation of Eq. (\ref{eq:mathe_alpha})}
We study the solution of Eq. (\ref{eq:approx}) taking the limit $\tau, \delta \rightarrow 0$ while keeping $\delta/\tau \equiv$ constant. In order to deal with a sufficiently regular process, we take $x_t^{\tau}$ as a harmonic process \cite{Schimansky-Geier1990}, i.e. the stationary solution of the SDE
\begin{equation}\label{eq:harmonicnoise}
\left\{
\begin{array}{ccl}
dx_t^{\tau} & = & \frac{1}{\tau} z_t dt \\
dz & = & - \frac{\Gamma}{\tau} z_t dt - \frac{\Omega^2}{\tau} x_t^{\tau} dt + \frac{\sqrt{2 \gamma} \Omega^2}{\sqrt{\tau}} dW_t
\end{array}
\right.
\end{equation}
where $\Gamma$, $\Omega$ and $\gamma$ are constants, $W_t$ is a Wiener process, and $\tau$ is the correlation time for the Ornstein-Uhlenbeck process obtained taking the limit $\Gamma,\, \Omega ^2 \rightarrow \infty$ while keeping $\frac{\Gamma}{\Omega ^2} = 1$. As $\tau \rightarrow 0$, the rescaled solution of Eq. (\ref{eq:harmonicnoise}) $\frac{x_t}{\sqrt{\tau}} $ converges to a white noise.\\
In Eq. (\ref{eq:approx}), we make the time substitution $u = t-\delta$ and then write the equation in terms of the Wiener process $V_u$ defined as $V_u = W_{u + \delta} - W_{\delta}$. Next, we expand about $u$ to first order and rewrite the resulting equation as a first order system in $y$, $v$, $x$, and $z$, where $v = \sqrt{\frac{\delta}{\tau}}\sqrt{\tau}\frac{dy}{du}$. We then consider the backward Kolmogorov equation associated with the resulting SDE, which gives the equation for the transition density $\rho(u, y, v, x, z, y', v', x', z', u')$. We can expand $\rho$ in powers of the parameter $\sqrt{\tau}$, i.e. $\rho = \rho_0 + \sqrt{\tau} \rho_1 + \tau \rho_2 + ...$. We use the standard homogenization method \cite{Pavliotis} to derive the backward Kolmogorov equation for $\rho_0$ \cite{Risken}, i.e. the equation for the the limiting transition density $\rho_0$ as $\tau, \delta \rightarrow 0$ with $\delta/\tau \equiv$ constant. Finally, we take the limit $\Gamma, \Omega
^
2 \rightarrow \infty$ while keeping the ratio $\frac{\Gamma}{\Omega ^2} = 1$. The resulting backward Kolgomorov equation is
\begin{equation}\label{eq:Kolgomorov}
\frac{\partial \rho _0}{\partial u} =
\left[
- k y
+
\frac{0.5}{1+\frac{\delta}{\tau}}
\sigma^2 F(y) \frac{dF(y)}{dy}
\right]
\frac{\partial \rho _0}{\partial y}
+
\frac{\sigma^2 F^2(y)}{2}
\frac{\partial ^2 \rho _0}{\partial y ^2},
\end{equation}
and the associated (It\^o) SDE is
\begin{equation}\label{eq:SDEKolgomorov}
dy_t = - k y_t dt
+
\frac{0.5}{1+\frac{\delta}{\tau}}
\sigma^2 F(y) \frac{dF(y)}{dy}
dt
+
\sigma F(y) dW_t.
\end{equation}
The equation for $\alpha$ [Eq. (\ref{eq:mathe_alpha})] follows straightforwardly by comparison of Eq. (\ref{eq:SDEKolgomorov}) and Eq. (\ref{eq:SDEmult}).
\begin{acknowledgments}
The authors would like to thank Clemens Bechinger, Laurent Helden, Ao Ping, Riccardo Mannella and Antonio Sasso for inspiring discussions, Sergio Ciliberto and Antonio Coniglio for critical reading of the manuscript, and Alfonso Boiano for help in the realization of the circuit.
\end{acknowledgments}
\bibliographystyle{unsrt}
|
{
"timestamp": "2012-06-28T02:03:53",
"yymm": "1206",
"arxiv_id": "1206.6271",
"language": "en",
"url": "https://arxiv.org/abs/1206.6271"
}
|
\section{Introduction}
Observations over the last two decades have revealed central
massive black holes in all sufficiently well-observed massive
galaxies (e.g., \citealt{2011ApJ...738...17G}).
However, the case is not as clear for lower-mass
galaxies or globular clusters, and indeed although there is
evidence for black holes in some low-mass galaxies
\citep{2010ApJ...721...26G,2011ApJ...727...20K} there
are examples of galaxies that clearly do not have black holes
that follow the standard mass -- velocity dispersion ($M-\sigma$)
relation \citep{2001Sci...293.1116M,2001AJ....122.2469G}
and the case for globular clusters is far from
clear (e.g., \citealt{2002AJ....124.3270G, 2003ApJ...595..187M,
2003ApJ...582L..21B,2012ApJ...750L..27S}).
Here we approach this question by focusing on the velocity
dispersion rather than the mass of a stellar system. In Section 2
we show that above a critical velocity dispersion $\sigma_{\rm
crit}\sim 40~{\rm km~s}^{-1}$, the total binding energy in
primordial binaries that can be tapped in three- and four-body
interactions is significantly less than the total binding energy of
the system as a whole, and hence if such systems are dynamically
relaxed they will undergo deep core collapse essentially unhindered
by dynamical heating from binaries (thus leading to one of the
scenarios discussed by \citealt{1978MNRAS.185..847B} in the context
of more massive clusters). We note that the galaxies seen thus far
without massive black holes have velocity dispersions below this
limit (e.g., NGC 205 has $\sigma=39$~km~s$^{-1}$ and M33 has
$\sigma=24$~km~s$^{-1}$; see \citealt{2009ApJ...698..198G} and
references therein). In Section 3 we discuss the evolution of
binary-free systems. Previous studies have demonstrated that the
black holes in such systems sink rapidly to the center and interact
mostly with each other in a dense subcluster. This leads to three
paths, all of which culminate in the formation of a massive black
hole: (1)~For sufficiently high escape speed systems dynamical
interactions result in runaway merging of the black holes into a
massive hole. For lower escape speed systems either one or zero
black holes remain after ejection of merged pairs due to asymmetric
emission of gravitational radiation during coalescence or Newtonian
recoil from interactions of black holes with dynamically formed
binaries. (2)~If one black hole remains then it tidally disrupts
ordinary stars and consumes the remnant disks quickly, hence grows
rapidly into a massive black hole; other growth mechanisms, such as
the accretion of nascent gas or winds, are insignificant. (3)~If no
black holes remain then runaway collisions form a massive star that
evolves into a black hole, and this first black hole grows via
accumulation of tidally disrupted stars. Thus once binary support
is removed, massive black hole formation is assured as long as holes
consume tidal remnants quickly. In Section 4 we determine the
minimum mass of a black hole formed via these paths and discuss the
implications of this scenario.
\section{Velocity dispersion threshold for deep core collapse}
Stellar systems that are in virial equilibrium evolve
via two-body interactions over their relaxation time, which
for a star of mass $m$ in a system of velocity dispersion
$\sigma$ at a location with an average stellar mass density
$\rho$ is
\begin{equation}
t_{\rm rlx}\approx {0.3\over{\ln\Lambda}}{\sigma^3\over{
G^2\rho m}}
\end{equation}
\citep{1987degc.book.....S},
where $\ln\Lambda\sim 5-10$ is the Coulomb logarithm. The
evolution of an isolated stellar system is towards a greater
concentration of stars in the center balanced by a greater
expansion of the cluster on the outskirts; there is a productive
analogy with thermodynamics, in which this behavior can
be seen as the gradual increase of cluster entropy (the greater
phase space accessed by the outer stars more than makes up for
the diminished phase space accessed by the stars in the core).
It was demonstrated several decades ago that if all the stars
are single (as opposed to being in binary or multiple systems),
then over a timescale that scales with the relaxation time at
the half-mass radius for a typical star (where the multiple is
$\sim 15$ for an initially Plummer sphere of equal-mass stars
but is $\sim 0.2$ if there is a broad initial mass function; see
\citealt{2002ApJ...576..899P}), the core becomes so dense that
it loses thermal contact with the rest of the cluster and the
core undergoes a collapse such that the number density in the
inner portions scales as $n\sim r^{-2.2}$
\citep{1980MNRAS.191..483L,1980ApJ...242..765C}. If we take
present-day nuclear star clusters as an example, then from
Figure~1 of \citet{2009ApJ...694..959M} we find that most
have half-mass relaxation times less than ${\rm few}\times 10^{10}$~yr
and thus are candidates to collapse within a Hubble time if
they had broad initial mass functions and no central massive
object to supply energy.
Binaries are the key to sustaining a cluster against this collapse.
When number densities become high enough that binary-single
interactions are common, such interactions can harden the binary
and hence inject energy into the cluster that decreases its density.
Many calculations (see, e.g., \citealt{1961AnAp...24..369H,
1975MNRAS.173..729H} for pioneering work) have shown
that binaries that are initially hard (meaning that their binding
energy exceeds the kinetic energy of a typical single star) tend
to harden via binary-single interactions, whereas initially soft
binaries tend to soften and eventually break up. Consistent with
this expectation, globular clusters have a significantly smaller
binary fraction than is seen in the field
(e.g., \citealt{1997ApJ...474..701R,2012A&A...540A..16M}).
In principle, even a very small number of binaries could have
enough binding energy to hold off the collapse of a cluster.
Consider for example a reasonably rich globular cluster with a
velocity dispersion of 10~km~s$^{-1}$. A binary of two solar-mass
stars near contact, with an orbital radius of 0.01~AU, has
$\sim 10^3$ times the binding energy per mass that a single cluster star
has in kinetic energy, so if 0.1\% of stars are in such binaries
the energy to hold off cluster collapse appears to be present.
White dwarfs are 100 times smaller yet, so it might seem that if there
is one near-contact white dwarf binary in a cluster of $10^5$ stars
its binary interactions could successfully oppose core collapse.
This is of course not true, for two reasons. First, as the
semimajor axis of a binary shrinks, its close interactions with
single stars have a greater and greater chance of destroying the
single star or one of the binary stars, hence the kinetic energy
of recoil is not shared with the cluster \citep{1994ApJ...424..870D}.
As an example, a tight
white dwarf binary cannot eject a main sequence star in this way.
Second, even if a three-body interaction is clean, a star
that is thrown completely from the cluster cannot
share its kinetic energy with the cluster and the only expansion
of the core comes from the comparatively minor effect that the
core now has lost one star's worth of mass.
The available binding energy from binaries is thus limited; clusters
having higher velocity dispersions having a more limited
available binding energy. As we now argue, this means that above
a velocity dispersion $\sigma_{\rm crit}\sim 40$~km~s$^{-1}$,
the binaries cannot hold off core collapse.
It should be noted that the velocity dispersion of a cluster will evolve
as a function of time, with velocity dispersions being somewhat larger
in the past when the cluster was more massive
(e.g., \citealt{2009MNRAS.395.1173G,2010MNRAS.407.2241K}).
The effect could be particularly enhanced for clusters containing
multiple stellar populations where a large fraction of the first generation
of stars are lost \citep{2008MNRAS.391..825D}. However the velocity
dispersion at later times will be more relevant to the discussion in this paper,
as this is when core collapse may typically be possible (i.e., on timescales
longer than the half-mass relaxation time).
As a first estimate of the available binding energy for a binary
with initial semimajor axis $a_0$, we assume that the eccentricity
distribution of binaries with a given semimajor axis is a thermal
distribution $P(e<e_0)=e_0^2$ truncated at the maximum
eccentricity $e_{\rm max}$ allowed for pericenter distances
greater than some minimum $r_{\rm p,min}$ (this could be the
pericenter distance at which stars collide), $a(1-e_{\rm
max})=r_{\rm p,min}$ for a semimajor axis $a$. Thus a fraction
$e_{\rm max}^2$ of orbits are allowed, hence the binding energy
that can be released from semimajor axis $a+da$ to $a$ is weighted
by $e_{\rm max}^2(a)=1-2r_{\rm
p,min}/a+(r_{\rm p,min}/a)^2$. Thus the total available binding
energy from an initial semimajor axis $a_0$ with stars of mass $m$ is
\begin{equation}
E_{\rm bind,tot}(a_0)=\int_{r_{\rm p,min}}^{a_0}
{Gm^2\over {2a^2}}e^2_{\rm max}(a)da\; .
\end{equation}
This gives
\begin{equation}
\begin{array}{rl}
E_{\rm bind,tot}(a_0)&={Gm^2\over{2r_{\rm p,min}}}
\biggl[{1\over 3}-{r_{\rm p,min}\over a_0}+\left(r_{\rm p,min}\over a_0\right)^2\\
&-{1\over 3}\left(r_{\rm p,min}\over a_0\right)^3 \biggr] \; .
\end{array}
\label{eq:bind}
\end{equation}
For $a_0>10r_{\rm p,min}$, $E_{\rm bind,tot}$ is roughly constant at
$Gm^2/6r_{\rm p,min}$ whereas it decreases rapidly below
$10r_{\rm p,min}$, so for simplicity we will approximate $E_{\rm bind,tot}$
as zero below $10r_{\rm p,min}$ and $Gm^2/6r_{\rm p,min}$ above it.
Our next step is to note that for stars formed in a low-density
environment, there is roughly one binary per single star, and the
binary semimajor axes are approximately equally distributed in $\ln
a$ from 0.01~AU to $\sim 10^4$~AU \citep{1982Ap&SS..88...55P}.
In an environment where binaries
beyond a certain semimajor axis are ionized by binary-single
encounters, the fraction of binaries will be decreased. For
example, if we begin with six single stars and six binaries and
ionize the ones larger than 1~AU, we now have fourteen single
stars and two binaries. If as above we now only concentrate on
the binaries larger than $0.1~{\rm AU}=10r_{\rm p,min}$, this
represents $f\sim 7\%$ of the stars in the system. Thus the binary
binding energy per {\it all} stars in the system is
\begin{equation}
e_{\rm bin}/{\rm star}=fGm^2/(6r_{\rm p,min})\; .
\end{equation}
This is to be compared with the binding energy per star in the
cluster, which by the virial theorem equals the kinetic energy per
star in the cluster, or
\begin{equation}
e_{\rm cluster}/{\rm star}={1\over 2}m\sigma^2
\end{equation}
for a velocity dispersion $\sigma$. The point at which
$e_{\rm bin}/{\rm star}<e_{\rm cluster}/{\rm star}$ is the point at
which core collapse is theoretically possible. From the numbers
above, if the single stars and the binary components both have
masses $\approx 1 M_\odot$ this happens when $\sigma\sim 40$~km~s$^{-1}$,
meaning that interactions with binaries of semimajor axis
$\gtorder 0.5$~AU have positive total energy and are thus soft,
core collapse can proceed.
If the initial distribution of binary binding energies is extremely
unusual, e.g., if most stars are formed in binaries with semimajor
axes less than 0.5~AU, then the supply of binary energy would be
greater and the threshold velocity dispersion could in principle
be raised. Barring such an unexpected distribution, however, the
threshold should be robust.
Indeed, work by \citet{1996IAUS..174..263C} suggests that there may
be less binary energy available than we derive above.
They take into account that,
rather than simply resetting the eccentricity of a binary, a
binary-single encounter can be resonant and hence for a given
interaction there is a greater chance to get to a very small
separation. From their Figure~4 we infer that for solar-type stars and
$\sigma=40$~km~s$^{-1}$ a typical energy
$\Delta E\approx 6\times 10^{46}$~erg can be extracted from an
initially hard binary, whereas equation (\ref{eq:bind}) gives roughly an
order of magnitude larger energy. Thus at $\sigma=40$~km~s$^{-1}$,
and perhaps at a slightly lower velocity dispersion, the energy that can
be extracted from primordial binaries is significantly less than
the binding energy of the cluster, hence such clusters can undergo
core collapse without being impeded significantly (three-body binary
formation and two-body tidal capture are also insignificant; see
\citealt{1983ApJ...268..319H} and \citealt{1975MNRAS.172P..15F}, respectively).
\section{Paths towards massive black hole formation}
We now evaluate the paths towards massive black hole formation that we
mentioned in the introduction: runaway merging of black holes, tidal
disruption of stars by a single remaining black hole, and formation of
a new black hole from runaway collisions of stars, followed by tidal
disruption of stars by that black hole. The first important question
is one of time scales. In all three paths, the overwhelmingly longest
phase is the initial progression to core collapse. To see this, note
that the time to core collapse is a multiple less than unity ($\sim
0.2$ for systems with a broad mass distribution; see
\citealt{2002ApJ...576..899P}) of the relaxation time of the nuclear
star cluster, which from Figure~1(a) of \citet{2009ApJ...694..959M} is
$t_{\rm rlx}\sim 10^9~{\rm yr} (M/10^6~M_\odot)$ with a spread of a
factor $\sim 10$. There is some evidence that nuclear star clusters
obey a similar $M-\sigma$ relation to that seen for higher-mass black
holes. There is some observational evidence for this; e.g., Figure~2
from \citealt{2006ApJ...644L..21F}) indicates that nuclear star
clusters might have the same $M-\sigma$ slope as has been found for
black holes (see \citealt{2009ApJ...698..198G} for a recent discussion
of this relation) but offset so that the mass is a factor of $\sim 10$
higher than the black hole mass would be. If we loosely equate the
velocity dispersion of the cluster with that of the surrounding
bulge, this gives a cluster mass of
\begin{equation}
M_{\rm cl}\approx 10^6~M_\odot(\sigma/40~{\rm km~s}^{-1})^4
\end{equation}
based on the scalings of \citet{2009ApJ...698..198G}. Thus
clusters with $\sigma\ltorder 100~{\rm km~s}^{-1}$ have a chance
to undergo core collapse within a Hubble time. Figure~\ref{fig:flowchart}
illustrates the paths we consider.
\begin{figure}[htb]
\begin{center}
\hspace*{-1.3cm}
\includegraphics[scale=0.45]{f1.eps}
\caption{Paths towards massive black hole formation in a stellar
cluster. At velocity dispersions less than $\sim 40~{\rm km~s}^{-1}$,
heating from binaries prevents full core collapse. At velocity
dispersions greater than $\sim 100~{\rm km~s}^{-1}$, a typical
nuclear star cluster will have too long a relaxation time for
its core to collapse in a Hubble time, although a massive black
hole can form in other ways. At intermediate velocity
dispersions, full core collapse will occur and will likely result
in either zero or one remaining stellar-mass black hole. In the
latter case, the hole will grow via tidal disruption of stars;
in the former, the stars will undergo runaway collisions that
produce a black hole, which will then grow via tidal disruption
of stars.}
\label{fig:flowchart}
\end{center}
\end{figure}
This step is necessary for all three paths we discuss. For a
collapsed core, the self-similarity arguments of
\citet{1980MNRAS.191..483L}, and the classic simulations of
\citet{1980ApJ...242..765C}, show that if all the stars are treated
as point masses and no three-body binary formation is allowed, then
the density of a single-mass system evolves towards a $n\propto
r^{-2.2}$ configuration. This is quite close to a singular
isothermal sphere $n\propto r^{-2}$, hence we will assume that the
velocity dispersion is nearly constant in the collapsed region. As
a result, the relaxation time scales roughly as $\rho^{-1}$, so the
evolution timescale is shorter by orders of magnitude in the core of
the cluster than it is in the cluster as a whole.
As a result, once the core collapses all three paths are traveled in
a time much shorter than the time to collapse. For example, runaway
mergers between black holes (or, in the third path, runaway
collisions between stars) occur roughly on the core relaxation
timescale, because when the number density is not yet sufficient for
frequent mergers or collisions, further relaxation will increase the
density on the relaxation time until interactions are frequent. Thus
the only limiting factor is the initial collapse time. We also note
that unlike in the scenario of runaway collapse of young massive
clusters proposed by \citet{2002ApJ...576..899P}, the time window
for runaway collisions of stars to form a single black hole (in the
third path) is not millions of years, but billions of years. The
reason is that when the cluster is young enough that initially all
stars are on the main sequence, supernovae from the most massive
stars begin at $\sim$2.5~Myr and proceed for many stars, causing the
core to lose a large amount of mass to the ejecta and therefore
expand and lower the number density. In contrast, in our picture
the evolution to core collapse is much later, perhaps billions of
years, hence the remaining stars are low-mass and thus only the
collision product will be massive enough to explode; very little
mass is lost, so the density remains high.
In addition to the general core collapse, in a multimass
system there is considerable mass segregation. This means
that the stars in the core will tend to be towards the
massive end, perhaps $\sim 1~M_\odot$ after billions of years.
In addition, of the objects
likely to be present after a long time, stellar-mass black
holes will be by a factor of a few to several the most massive.
Many studies (e.g., \citealt{2008MNRAS.386...65M}) have
concluded that the black holes then form
a dense subcluster in which the holes interact mainly with themselves.
If, as in our scenario, there are no binaries, then the holes
can reach extremely high density in the center of the subcluster
and capture each other via emission of gravitational radiation
in initially hyperbolic two-body encounters.
From \citet{1989ApJ...343..725Q}, the critical pericenter for
a two-body gravitational wave capture between two black
holes with a total mass $M=m_1+m_2$ and a reduced
mass $\mu=m_1m_2/M$ is
\begin{equation}
\begin{array}{rl}
r_{\rm p,GW}&=8.5\times 10^8~{\rm cm}~(M/20~M_\odot)^{5/7}\\
&\times(\mu/5~M_\odot)^{2/7}(\sigma/40~{\rm km~s}^{-1})^{-4/7}\; ,
\end{array}
\end{equation}
and hence their gravitationally focused cross section is
\begin{equation}
\begin{array}{rl}
\Sigma_{\rm bh}&=2\pi r_pGM/\sigma^2\approx
9\times 10^{23}~{\rm cm}^2(M/20~M_\odot)^{12/7}\\
&\times(\mu/5~M_\odot)^{2/7}
(\sigma/40~{\rm km~s}^{-1})^{-18/7}\; .
\end{array}
\end{equation}
When two black
holes capture each other in this way, their inspiral is extremely
rapid: from \citet{1964PhRv..136.1224P}, the inspiral time is
\begin{equation}
\begin{array}{rl}
a/(da/dt)&={5\over{64}}{c^5a^4(1-e^2)^{7/2}\over{
G^3\mu M^2(1+73e^2/24+37e^4/96)}}\\
&\approx 10^5~{\rm yr}
(a/R_\odot)^4(1-e^2)^{7/2}
\end{array}
\end{equation}
where the approximation is for $e\approx 1$ and our given number
assumes $m_1=m_2=10~M_\odot$. For $a\approx 1
R_\odot=7\times 10^{10}$~cm and $e>0.99$ (so that $r_p<r_{\rm p,GW}$),
the inspiral time is therefore less than 0.1~years, and for a
fixed $r_p$ the inspiral time scales as $a^{1/2}$ so that even
for an initial $a=100$~AU the inspiral time is just a few years.
When the holes do merge they emit gravitational radiation that is
in general asymmetric, meaning that the remnant single black hole
will recoil relative to its original center of mass. Studies of
black hole recoil \citep{2007ApJ...668.1140B,2008ApJ...682L..29B,
2008PhRvD..77d4028L,2009PhRvD..79f4018L,2010CQGra..27k4006L,
2010ApJ...719.1427V,2012arXiv1201.1923L} show that although kicks from
the coalescence of nonspinning black holes are limited to $<200~{\rm
km~s}^{-1}$, rapidly spinning black holes can produce remnants
that travel at thousands of kilometers per second relative to
their original center of mass. Thus in this environment, unlike in
the conditions that may exist in the $z>10$ universe
\citep{2011ApJ...740L..42D},
mergers that are restricted to comparable-mass black holes are
most likely to lead to an ejection of the remnant.
As pointed out
to us by S. Sigurdsson (private communication), for low velocity
dispersions the ejection of black holes is likely to be dominated
by encounters with hard binaries formed by the interaction of three
initially hyperbolic black holes. \citet{1985IAUS..113..231H}
finds that the rate of formation of ``immortal" binaries by
this process (i.e., binaries that are not later softened and
ionized) is ${\dot n}_{3B}=126G^5m^5n^3/\sigma^9$, where he
assumes objects of identical mass $m$ and $\sigma$ is the three-dimensional
velocity dispersion. Thus the ratio of the formation rate per
volume of these binaries to the rate of gravitational wave capture
of black holes by each other, assuming equal masses, is
\begin{equation}
{{\dot n}_{3B}\over{{\dot n}_{\rm BHBH}}}\approx
200 (n/10^{10}~{\rm pc}^{-3})(m/10~M_\odot)^3
(\sigma/40~{\rm km~s}^{-1})^{-52/9}\; .
\end{equation}
Thus for low to moderate velocity dispersions, and high number
densities, binary-single ejections are likely to dominate. The
result will be similar to the case in which only double black
hole mergers occur: there will either be zero or one hole left.
For the rest of this section we concentrate on ejections by mergers.
We set up a simple simulation of the evolution of a black hole
subcluster with no binaries. We assume that initially there are
either 100 or 101 black holes; note that even if all mergers eject
the remnant, having an odd number initially guarantees that one
will survive because with no binaries the interactions are
pairwise. The distribution of black hole masses is not
well-established, and the distribution of their spins is even less
so, but as an illustrative example we draw the initial masses of
the black holes from the range $[5,30)~M_\odot$, with a
distribution $dN/dM\propto M^{-2}$, and the initial spins are drawn
uniformly from the range $cJ/(GM^2)=[0,1)$.
We simulate the evolution of the cluster interaction by
interaction using the rejection method: we select two black
holes randomly, compute the cross section $\Sigma$ of the interaction,
divide by the largest possible cross section $\Sigma_{\rm max}$
(which is the cross section of capture by the two most
massive black holes in the sample), and then compare that
ratio with a uniform random deviate $x\in [0,1)$. If
$x<\Sigma/\Sigma_{\rm max}$ we accept the interaction,
otherwise we draw again.
If the interaction is between two black holes, then we use
the recent \citet{2012arXiv1201.1923L} formula for the kick. If
the kick is greater than the escape speed $v_{\rm esc}=4\sigma$
(typical of a core-collapsed cluster) we assume that the remnant
has been ejected from the cluster and thus we remove both
black holes from the sample. Otherwise, we assume
the remnant remains, hence we sum the masses of the holes
and estimate the spin of the remnant following the prescription
given in \citet{2008PhRvD..78d4002R}.
Figure~\ref{fig:bh} shows the results. Here we
plot the fraction of clusters that retain a black hole
after subcluster evolution, and the median mass of the
final black hole if one remains, as a function of the velocity
dispersion $\sigma$ of the cluster. For each velocity dispersion
we performed $10^4$ simulations. For low escape speeds, almost
all mergers between black holes eject the remnant, hence
retention depends on whether the initial number of holes is
even or odd. As the escape speed increases, so does the
probability that a merger will not eject the remnant; for
$v_{\rm esc}\ltorder 100$~km~s$^{-1}$ it is most probable
that this happens when the spins of the holes are low and
their masses are close to each other (note from symmetry
that there is zero recoil from the merger of equal-mass
nonspinning holes). As the escape speed increases further,
mergers between black holes of different masses can be
retained, until at $v_{\rm esc}\gtorder 800$~km~s$^{-1}$
a runaway occurs and a single victorious black hole is
usually the result.
\begin{figure}[htb]
\begin{center}
\plotone{f2.eps}
\caption{Fraction of clusters of a given velocity dispersion
that retain a black hole after a succession of mergers (upper
left curves) and, if a black hole is left, the median mass of
the remaining black hole (lower right curves). For this figure
we ignore the effects of hard binaries formed by the
interactions of three initially hyperbolic black holes (see
text), hence there is a difference between cases with an
initially even and an initially odd number of black holes. The
solid curves are for 100 initial black holes, and the dotted
curves are for 101 initial black holes; the asymmetry in
retained fraction at low velocity dispersions is because if
every black hole merger results in an ejection, an initially
even number will leave behind no black holes whereas an
initially odd number will leave behind one. We assume an escape
speed that is four times the velocity dispersion. This figure
demonstrates that retained runaway mergers leading to massive
seeds are only likely for velocity dispersions $\gtorder
200$~km~s$^{-1}$.}
\label{fig:bh}
\end{center}
\end{figure}
From these simulations we can argue that for clusters with
velocity dispersions $\ltorder 100$~km~s$^{-1}$ a runaway
is unlikely, but that there is roughly an equal chance of
leaving behind either one or zero holes (depending largely
on the parity of the initial number until $\sigma\gtorder
60$~km~s$^{-1}$). When there is a black hole left behind
it is likely for $\sigma\ltorder 100$~km~s$^{-1}$ to be
at the low end of the mass distribution ($\sim 5~M_\odot$),
because such black holes are initially more common. In addition,
lower-mass black holes have a lower cross section for capture
and hence an enhanced probability of survival.
The subsequent evolution has two possibilities:
{\it One black hole remains.}---Then as we discuss in the next
section, the black hole will sit near the center of the high
number density distribution of stars. Tidal disruptions will
add a few tens of percent of the stellar mass to the hole, mostly
within a few weeks or less of the initial disruption, hence the
hole will grow quickly. Given that interactions with the stars
cannot eject the hole from the cluster, it will become a massive
black hole in a short timescale.
{\it No black holes remain.}---In this case, the stars will
undergo runaway collisions with themselves, leading to the
production of a massive star that will then become a black hole
(e.g., \citealt{2002ApJ...576..899P}). The situation then
reduces to the previous case, because the time needed to produce
a {\it second} black hole, which could potentially eject the
first, is significantly larger than the time needed for the
first hole to increase its mass to the point that it can no
longer be ejected.
We now discuss these possibilities in greater depth.
\subsection{Interactions between stars and a black hole}
Although the central density after core collapse is formally
infinite, the finite number of stars means that this translates
to a few stars in a small region near the core. For example,
if we consider the inner $\sim 10$ solar-type stars after core
collapse and continue to assume a constant velocity dispersion,
then they are in a region $r=GM/\sigma^2\sim 5~{\rm AU}
(M/10~M_\odot)(\sigma/40~{\rm km~s}^{-1})^{-2}$ in radius,
with a resulting number density of $n>10^{14}~{\rm pc}^{-3}$.
Even the inner 1000 stars are in a region with $n>10^{10}~{\rm pc}^{-3}$,
so interactions will be common and rapid.
{\it Stellar tidal disruption by black holes.}---A
promising mechanism for such runaway growth is tidal disruption
of stars by stellar-mass black holes.
The critical pericenter for tidal disruption of a star of
mass $m$ and radius $R$ by a black hole of mass $M$ is
\begin{equation}
r_{\rm p,tidal}=(3M/m)^{1/3}R\; .
\end{equation}
Thus the gravitationally focused cross section for tidal disruption,
assuming that the black hole mass greatly exceeds the stellar mass,
is
\begin{equation}
\Sigma_{\rm tidal}\approx 10^{26}~{\rm cm}^2(M/10~M_\odot)^{4/3}
(\sigma/40~{\rm km~s}^{-1})^{-2}
\end{equation}
for solar-type stars. This is roughly an order of magnitude
greater than the star-star collision cross section discussed
later, and two orders
of magnitude larger than the black hole -- black hole capture
cross section. Moreover, the rate is nonlinear in the mass of the
black hole ($\Sigma\propto M^{4/3}$). Thus the conditions
for a runaway exist.
If tidal disruption does occur, then the mass will be force fed
to the black hole at an extremely super-Eddington rate.
Studies suggest that fallback initially occurs over several
times the internal dynamical time of the disrupted star
\citep{1989ApJ...346L..13E},
which is several hours for a solar-type star. The accretion
rate is therefore many millions of times
the Eddington rate of a stellar-mass black hole. Analyses of such
supercritical accretion (e.g., \citealt{1976ApJ...206..295M,
1979MNRAS.187..237B,1980AcA....30....1J,1999ApJ...518..356P,
2005ApJ...628..368O}) indicate that the matter will indeed
flow into the hole at that rate, but that most of the photon luminosity
that is generated will be advected in with the very optically thick
matter (hence although the accretion rate is tremendously
super-Eddington, the luminosity could be limited to Eddington or
slightly higher). Thus it is expected that within a matter of days, i.e.,
much shorter than any other relevant timescale, most of the bound
remainder of the star will flow onto the hole. If this is the case,
then the majority of the accretion will finish without harassment
from additional encounters by stars. If, on the contrary, the
accretion rate is actually limited to the Eddington rate then the
time needed to accrete most of the matter is much longer than the
time to the next encounter, and the disk might be disrupted, leading
to negligible growth of the hole.
The unbound remnant of the star will be thrown outwards at speeds
comparable to the orbital speed at tidal disruption, which is
$\sim 800~{\rm km/s}(M/10~M_\odot)^{1/3}$ for a solar-type star.
This is much larger than the escape speed, so the wind will depart
ballistically unless it runs into many times its own mass in gas
in the cluster. However, given that the virial temperature of the
cluster is $\sim 10^5~{\rm K}(\sigma/40~{\rm km~s}^{-1})^2$ and that
cooling is extremely efficient at that temperature, the total
amount of gas in the cluster at a given time will be small even
though its escape speed is sufficient to retain winds from red
giants or (earlier, when more massive stars existed) planetary
nebulae. Thus we assume that the unbound gas simply escapes from
the cluster. The ratio of unbound gas to gas that accretes onto
the black hole is rather uncertain. The initial disruption leaves
about half the mass bound \citep{1989ApJ...346L..13E}, but shocks upon
the return of the bound matter might unbind additional mass. In a
recent study by \citet{2009MNRAS.400.2070S} they consider different
ejection fractions
ranging from $f_{\rm esc}=0.5$ (corresponding to negligible return shocks)
to $f_{\rm esc}=0.8$ (corresponding to powerful return shocks).
In our scenario, the upshot is that because a single black hole
will grow, its growth will eject up to a few times its own mass
in stellar debris. Until this reaches at least hundreds, and probably
thousands, of solar masses this will be such a small fraction of
even the core mass that we expect it to have a minor effect on the
dynamical evolution.
{\it Star-star collisions.}---At the velocity dispersions we consider,
these collisions are likely to lead to mergers with little mass loss,
because $\sigma\sim 40~{\rm km~s}^{-1}$ is much less than the escape
speed $\sim 600~{\rm km~s}^{-1}$ of a solar-type star. For
the same reason, these collisions are gravitationally focused,
with a cross section $\Sigma=\pi r_p^2(1+2GM_{\rm tot}/(r_p\sigma^2))\approx
2\pi(GM_{\rm tot}/\sigma^2)r_p$ for a pericenter distance $r_p$ and
a total mass between the stars of $M_{\rm tot}$. The
relevant pericenter distance is the sum of the stellar radii,
which is $2R_\odot\approx 0.01$~AU for two solar-type stars, hence
for two such stars
$\Sigma\approx 1.5\times 10^{25}~{\rm cm}^2(\sigma/40~{\rm km~s}^{-1})^{-2}$.
The characteristic time of interaction is then
$\tau=1/(n\Sigma\sigma)\approx 10^6~{\rm yr}(n/10^{10}~{\rm pc}^{-3})^{-1}
(M_{\rm tot}/2~M_\odot)^{-1}(\sigma/40~{\rm km~s}^{-1})^{-1}$.
Note that as a result even for the inner $\sim 10^3$ stars the
collision time for solar-type stars is much less than their
$\sim 3\times 10^7$~yr Kelvin-Helmholtz time, hence the stars
will not be able to radiate their collisional energy before the
next collision. However, because the velocity dispersion is
$<0.1$ times the stellar escape speed, the energy added is minor
and most of the pressure holding up the collision product stems
from gravitational contraction rather than either collision
energy or nuclear energy; these are thus not stars in the standard
sense, and need not have luminosities as high as those of main
sequence stars of the same mass.
In addition, on the main sequence, stellar radii increase with
increasing mass, hence the rate of interactions increases more than
linearly with increasing stellar mass. An additional factor
is that more massive stars tend to sit closer to the center of
the potential, where the number density of objects is greater.
The conditions are thus ripe for a runaway, and indeed runaway
merging of stars has been proposed as a mechanism for the
generation of supermassive stars that later evolve into
intermediate-mass black holes \citep{2002ApJ...576..899P,
2006MNRAS.368..141F}. It has been
suggested that the high wind rates expected for high-mass stars
can severely limit the growth of supermassive stars
\citep{2009A&A...497..255G}. Note,
however, that these wind rates are based on extrapolations of
winds for main sequence stars, and as indicated above the
collision products will be substantially larger and less
luminous than main sequence stars. Indeed, the collision products
are more likely to be a ``bag of cores" than an actual star, where an
extended gaseous envelope engulfs an ensemble of stellar cores.
We do note that although \citet{2009A&A...497..255G} argue that
winds may prevent the formation of intermediate-mass black holes,
they find that runaway collisions produce stars massive enough
to evolve to normal stellar-mass black holes, at least. Thus
for our purposes we assume that star-star collisions will lead
to black hole production.
The question is then whether the first black hole that forms
has enough time to consume many stars so that by the time the
next black hole forms, the first one is so massive that any
BH-BH merger will produce a weak recoil that retains the remnant
in the cluster. We argue that this is in fact the case: the
first black hole to form will be at the center of the mass
distribution, where the number density is the highest. If this
is, for example, in the region occupied by the inner $\sim 100$
stars, then the number density is such that tidal disruptions of
stars by even a $10~M_\odot$ black hole occur on average once
per few hundred years, and the interaction time scales as $M^{-4/3}$.
Thus the hole will double its mass every few thousand years, i.e.,
in a time vastly shorter than the lifetime of even the most
massive stars. We note that although the segregation of the
black holes to the center of the cluster and their ejection
leads to some flattening of the stellar number density near the
center of the cluster (see, e.g., \citealt{2012MNRAS.tmp.2546A}
and in particular his Figure~8 for a recent N-body simulation),
we expect that when most of the black holes have been ejected
the stars near the core, which have a very short relaxation time,
will regrow the cusp.
Hence the first black hole formed due to runaway
stellar collisions will be able to increase its mass by a large
factor before any other new-generation black hole forms.
\subsection{Minimum mass of central BH}
We consider here the evolution of a cluster with a velocity
dispersion large enough to guarantee core collapse. If a
black hole grows in the cluster, what is a rough approximation
to its minimum mass?
We will approach this question in two different ways. First, we will
determine the mass of a black hole nailed to the center of an
$n\propto r^{-2}$ core collapse cluster such that dynamical
processes around the black hole can supply enough heat to
help forestall further core collapse. Second, we will apply
the criterion that the wander radius of the black hole must
be less than its radius of influence, under the assumption
that otherwise the number of stars bound to the black hole
would be much less, hence its heating influence would be
reduced.
We will assume as before that the mass of the nuclear star cluster
is related to its velocity dispersion by
$M_{\rm cl}\approx 10^6~M_\odot(\sigma/40~{\rm km~s}^{-1})^4$. For a core
collapse cluster with $n\propto r^{-2}$, the velocity dispersion
is the same at all radii and the gravitational binding energy is
$E_{\rm bind}=(1/2)M_{\rm cl}\sigma^2$ from the virial theorem.
This energy must be compared with the available energy (as defined
before) from dynamics around the central black hole. The available
energy per unit mass around the black hole that we found previously is
$GM_{\rm BH}/(6r_{\rm p,min})$, where $r_{\rm p,min}$ is the minimum
pericenter distance of an orbit that can last long enough for
significant dynamical interactions. For a black hole, the relevant
time is the time for gravitational radiation to cause the object to
spiral in; this time scales as $T\sim (mM_{\rm BH}^2)^{-1}r_p^4$, roughly, so
for a fixed $T$ we have $r_{\rm p,min}\propto M_{\rm BH}^{1/2}$. We
used $r_{\rm p,min}\approx 0.01$~AU for $10~M_\odot$ (giving an
inspiral time of a few million years), so we will adopt
$r_{\rm p,min}=0.1~{\rm AU}(M_{\rm BH}/10^3~M_\odot)^{1/2}$.
If the distribution of stars around the black hole is a steep cusp, then
the stellar mass in the radius of influence of the black hole equals
the mass of the black hole (this need not be true if the density
distribution has a core profile; see equation (14) of
\citealt{2011ApJ...735...89L}). When we compare the available dynamical
energy of the stars around the black hole with the binding energy
of the cluster, we find that
\begin{equation}
M_{\rm BH}\gtorder 500~M_\odot(\sigma/40~{\rm km~s}^{-1})^4
\end{equation}
is required for the stars around the black hole to provide sufficient
energy to hold off collapse.
We can also approach this from a different angle. A finite-mass black
hole will not be nailed to the center of the cluster. Instead, it
will wander due to stochastic dynamical interactions. If the wander
radius is less than its radius of influence then we can suppose that
it is near the center of the stellar distribution where encounters
are frequent, but if the wander radius is larger then this need
not be the case and heating could be less efficient. Thus a different
criterion is $r_{\rm wander}<r_{\rm infl,BH}$. Suppose that there
is a nearly constant-density core in the inner 10\% of the
cluster; then the scale height of
a species in a cluster is inversely proportional to the square root
of its mass (from energy equipartition arguments), hence for this system
we expect
\begin{equation}
r_{\rm wander}\sim r_{\rm cl}(\langle m\rangle/M_{\rm BH})^{1/2}\; .
\end{equation}
Here $\langle m\rangle$ is the average mass of a star. The cluster
radius is $r_{\rm cl}=GM_{\rm cl}/\sigma^2$ and the radius of influence
of the black hole is $GM_{\rm BH}/\sigma^2$, hence the wander
criterion is
\begin{equation}
\begin{array}{rl}
0.1(GM_{\rm cl}/\sigma^2)(\langle m\rangle/M_{\rm BH})^{1/2}&\ltorder
GM_{\rm BH}/\sigma^2\\
M_{\rm BH}&\gtorder(0.01~M_{\rm cl}^2\langle m\rangle)^{1/3}\\
M_{\rm BH}&\gtorder 2\times 10^3~M_\odot (\sigma/40~{\rm km~s}^{-1})^{8/3}\; ,\\
\end{array}
\end{equation}
where in the last line we assume $\langle m\rangle=1~M_\odot$.
Recall that these are {\it lower limits} on the mass of the central
black hole. The mass could be considerably greater depending on
long-term accretion of stars or gas.
\section{Discussion and conclusions}
We have discussed the evolution of a relaxed cluster that has
a velocity dispersion $\sigma\gtorder 40$~km~s$^{-1}$, which
is large enough to render binaries insignificant,
but that does not initially contain a massive central black hole.
We argue that a massive hole will inevitably form if it can
swallow tidal debris rapidly: interactions
in the black hole subcluster will leave either zero or one
hole. In the case of zero, a black hole will form from the
product of runaway stellar merging. In either case, the
hole will feed quickly from the remnants of the stars it tidally
disrupts, and hence will grow until it has significant dynamical
effects on the cluster and thus slows its own growth. It is
not guaranteed that the holes will then follow the same
$M-\sigma$ relation that exists for higher velocity dispersion
systems. It is also not guaranteed that clusters with lower
velocity dispersions will {\it not} have black holes, but it
is possible that massive black-hole formation
is prevented as long as binaries have a significant heating effect
(see \citealt{2008ApJ...686..303G} for a numerical exploration
of the heating due to binaries or a massive central object).
\acknowledgements
We thank Sverre Aarseth, Tal Alexander, Jillian Bellovary,
Kayhan G\"ultekin, David Merritt, and Steinn Sigurdsson for
valuable discussions. We also thank the referee for a
constructive report.
This work was supported by NASA ATP grants NNX08AH29G and
NNX12AG29G (MCM) and Swedish Research Council grants
2008--4089 and 2011--3991 (MBD).
|
{
"timestamp": "2012-06-28T02:01:34",
"yymm": "1206",
"arxiv_id": "1206.6167",
"language": "en",
"url": "https://arxiv.org/abs/1206.6167"
}
|
\section{Introduction}
Over the last decade, there has been increasing interest in how network heterogeneity may affect nonequilibrium dynamics in qualitative ways \cite{barratbook}. One of the simplest and most important examples has been the susceptible to infected (SI) and suspectible to infected to removed (SIR) epidemic models, famous from epidemiology \cite{murray}, which model disease outbreaks in populations. A decade ago, \cite{pastor} first demonstrated that heterogeneous networks can fundamentally alter the dynamics of these processes in qualitative ways -- in particular, the epidemic threshold vanishes on scale free graphs of degree $\gamma \le 3$, so epidemics always infect a nontrivial fraction of nodes on an infinite graph, with finite size corrections later shown to be extremely small \cite{pastor10}.\footnote{In this paper, we will often casually say ``no epidemic threshold" when we are really referring to epidemic thresholds which vanish rapidly with $N$, the size of the network.} Later, in \cite{pastor2} it was shown that the removal of this threshold also corresponds to faster than linear epidemic growth on such graphs. Many authors have all explored various aspects of the dynamics of epidemic spreading. \cite{boguna, pastor3, pastor2, morenoy, gomez} extend analysis of mean field theory, and within this framework \cite{may, newman} discuss the late time behavior of epidemics on scale free graphs, with \cite{marder, noel} introducing some dynamical aspects. \cite{volz, millernote}, closest in spirit to this work, present reductions of the dynamical equations, although their approach is quite different. Mathematicians have used many complicated techniques to obtain information about generalizations of such solutions to more complicated epidemic types \cite{valls, nucci}, but have typically avoided studying the complication of adding an entire network structure. \cite{hetero} introduces an extension where bipartite graph structure can be reasonably accounted for by mean field theory, leading to a model of sexually transmitted disease (STD) epidemics.
In addition to processes which may be well modeled by the SIR epidemic, there are many others which share the same structure of the SIR epidemic -- irreversible flow from $\mathrm{S}\rightarrow\mathrm{I}\rightarrow\mathrm{R}$. A related example of such a process is that of rumor spreading \cite{moreno1, moreno2}, which is similar to that of SIR epidemic spreading but with a ``death rate" which is proportional to the current number of infected edges. A slightly more complicated version of the model also allows for the infected nodes to die on their own \cite{nekovee}, but the fundamental difference between this model and the epidemic is captured without this term. Other irreversible processes, such as a new model for recommendation spreading in a population \cite{blattner}, are also very similar, even if they do not have an identical $\mathrm{S}\rightarrow\mathrm{I}\rightarrow\mathrm{R}$ structure.
In this paper, we will present mean field dynamical solutions to the following 4 models: the SIR epidemic, the SI epidemic on bipartite graphs, a simplified model of rumor spreading in which only infected edges can induce transitions to the removed state, and the recommendation spreading model. These solutions should, for all intents and purposes, be regarded as exact -- the only approximation that they require is mean field theory, and they allow for reconstruction of all dynamical quantities of interest within the scope of mean field theory (most easily by numerical methods). For each model, the exact solution can be found for arbitrary degree distribution, when written in the form of an integral over a function defined based on the degree distribution of the underlying network. We will typically make some simplifying approximations to reduce the amount of work we have to do in analyzing the theoretical dynamics, but we stress that these approximations can be removed.
There are numerous reasons why the existence of such exact mean field solutions for arbitrary (mean field) networks is helpful. Other exact solutions have typically either focused only on the behavior at very late times \cite{newman}, or focused on very special types of graphs like the nearest neighbor 1D lattice \cite{williams}, or expressed as series solutions, which obscure the physical meaning of the solution \cite{khan}. Most importantly, the exact solution allows one to determine the accuracy of mean field theory, beyond a comparison of scaling behaviors. Furthermore, an exact solution provides dynamical information about the nature of the epidemic away from the fixed points of the dynamics, as well as precise information about the dynamics in regimes where linearized approximations break down, and we will indeed find more precise answers than we have found in the literature. We will present a basic analysis of the resulting equations as well as compare our results to numerical simulations, which are typically quite accurate. For simplicity, we will almost always work with scale free graphs, where the exact solution can be expressed in terms of integrals over incomplete $\Gamma$ functions with well understood properties -- furthermore, such graphs capture the essence of how network structure can dramatically change the qualitative dynamics.
The paper is organized as follows. Section \ref{epsec} discusses the epidemic models, while Section \ref{rusec} describes the rumor spreading models and Section \ref{sad} discusses the recommendation model; Section \ref{concsec} presents a discussion of the work. Numerical results are presented as we discuss the theory.
As this work was being finalized, we discovered a recent series of papers \cite{jc1, jc2, jc3} which discuss modeling variations of the SIR epidemic by reduction of the dynamics to finite sets of ODEs, using a technique somewhat related to ours. The focus of this work is quite different, emphasizing scaling behavior and asymptotic dynamics, as well applying this technique to models beyond the scope of epidemic spreading.
\section{SIR Epidemics}\label{epsec}
We begin by discussing the exact mean field solutions, and numerical corroborations of these solutions, for the epidemic spreading models. We first discuss the general structure of an ``epidemic like" process, then move on to the SIR epidemic, then describe why the irreversibility is so crucial, and finally discuss the SI STD epidemic.
\subsection{General Overview of Epidemic Processes on Networks}
This section is meant as a brief review of the nature of an epidemic-like process on a network, and the well-versed reader may happily skip it or skim it to ensure that he understands our notation.
We begin by quickly reviewing what we mean by a network, or graph. An (undirected) graph is a set of vertices $V$, along with a set of edges $E$, with an edge $e\in E$ associated to a pair of vertices: $e=(uv)=(vu)$ with $u,v\in V$. The degree of a vertex (or node) $v$, which we will label $k_v$, is the number of edges in $E$ with one of the ends of the edge being $v$.
The SIR epidemic is a stochastic process defined on such a network. The state space for this stochastic process is given by $\lbrace \mathrm{S}, \mathrm{I}, \mathrm{R}\rbrace^{|V|}$ -- i.e., each node can exist in state S, I or R. In theory, the SIR epidemic is a continuous stochastic process, with the rate of transition between states being defined as follows: if two graph configurations differ by more than 1 node, then no transitions are allowed. If the graphs differ by one node, than the following transitions are allowed: \begin{equation}
\text{for each node } v\in V: \;\;\;\;\; \left\lbrace\begin{array}{l} v: \mathrm{S}\rightarrow\mathrm{I} \text{ with rate } k_v\theta_v \\ v: \mathrm{I}\rightarrow\mathrm{R} \text{ with rate } \lambda \end{array}\right.,
\end{equation}where \begin{equation}
\theta_v \equiv \frac{\text{number of edges which point from }v \text{ to a node in state I}}{k_v}
\end{equation}Note that we have chosen to measure time in units where the rate of transition from S to I is 1, per edge.
The intuition for the above process is straightforward. If a node is an S, it is susceptible to becoming infected, which occurs by an interaction with an infected neighbor. The more infected neighbors the node has, the more likely the node is to catch the infection from one of them -- we assume this rate is linear. We then assume that a node dies with a constant rate once they catch the disease. There are many obvious variations on such a process, although most of them will not be likely to have an exact solution of the type found in this paper. We will consider a few simple processes of this form which do have such exact solutions.
It is well-known that mean field theory is typically a far better approximation to dynamical processes on such networks than on a graph like a hypercubic lattice, as the random structure of the graph, and the large number of edges, mean that the network itself helps to ``average" over states \cite{barratbook}. In this paper, we will always assume that $|V|\rightarrow\infty$ (the number of nodes is getting infinitely large) -- this is the regime where mean field theory should work best. Mean field theory will treat all nodes with the same $k_v$ as being the same, and so all we will care about is $\rho_k$, the fraction of nodes in $V$ which have $k_v=k$, and $S_k$, $I_k$ and $R_k$, the fraction of nodes which have $k$ edges which are in state S, I, or R respectively. Conservation of probability tells us that \begin{equation}
S_k + I_k+R_k=1
\end{equation} and so we can neglect the dynamics of $R_k$. The other key approximation of mean field theory will be that\begin{equation}
\theta_v = \theta \equiv\left[\sum k\rho_k\right]^{-1} \sum k\rho_k I_k \equiv \frac{\langle kI_k\rangle}{\langle k\rangle}, \label{ourtheta}
\end{equation}where we are using angle brackets to denote averages with respect to the distribution $\rho_k$.\footnote{(\ref{ourtheta}) is a bit simplistic, because since every infected node (other than a starting ``seed" infected node) was infected by contact with some other infected node, in reality an infected node with $k$ edges could at most transmit the infection to $k-1$ other states. However, we will only simulate things on graphs where each node has at least 5 or so edges, and this will not turn out to have a very large qualitative, or quantitative, impact on the discussion. It is also be very straightforward to remove this approximation, at the expense of introducing some more terms into the equations.}
\subsection{Solution for Scale Free Graphs}
The mean field equations of the SIR epidemic are easy to write down, given the rules above: \begin{subequations}\begin{align}
\dot{S}_k &= -k\theta S_k, \label{sk} \\
\dot{I}_k &= k\theta S_k - \lambda I_k \label{ik}.
\end{align}\end{subequations}Now, let us reduce this infinite set of dynamical equations, assuming that all nodes in the graph have at least $m$ edges. We begin with (\ref{sk}): \begin{equation}
\frac{\dot{S}_k}{\dot{S}_m} = \frac{\mathrm{d}S_k}{\mathrm{d}S_m} = \frac{-k\theta S_k}{-m\theta S_m} = \frac{k}{m}\frac{S_k}{S_m}.
\end{equation} This can be easily integrated to give, if we assume that $S_k(0)\approx S_m(0)\approx 1$: \begin{equation}
S_k(t) = S_m(t)^{k/m}. \label{skt}
\end{equation}
For later convenience, we will introduce the variable \begin{equation}
z(t) = -\log S_m(t), \label{zsm}
\end{equation}and we find we have reduced (\ref{sk}) to \begin{equation}
\dot{z} = m \theta. \label{dz}
\end{equation}
As we show in Figure \ref{zexpsir}, numerical simulations suggest that (\ref{skt}) becomes very quickly quantitatively true for a decent range of $k$ as soon as the epidemic takes off. We use scale free graphs, with \begin{equation}
\rho_k \sim \Theta(k-m) k^{-\gamma} \label{rhosf}
\end{equation} for simulations for the entirety of this paper, as that is where the dynamics becomes most interesting, and where our mean field solutions will become easier to write down. In all of our simulations, we use $m=10$.\footnote{We checked that this assumption did not lead to any qualitative changes in behavior -- e.g., if $m=5$ or $m=20$, the dynamics are very similar.} To generate quality scale free graphs, we use the preferential attachment algorithms of \cite{krapivsky}.\footnote{Other papers, e.g. \cite{pastor2}, show that the specific algorithm used to generate a scale free graph does not result in any qualitative change to the dynamics, so we will not worry about this point.} For a bit larger $m$, the values of $z$ become significantly higher, but this is a numerical fragment ($-\log 0 = \infty$ -- i.e., all nodes of a given connectivity have been infected or removed), and so we have truncated these unphysical values from our graph.\begin{figure}[here]
\centering
\begin{tikzpicture}
\begin{axis}[width=7cm, height=6cm, xlabel=$k$, ylabel=$-\log S_k\;\;$, ylabel style=sloped like x axis]
\pgfplotstableread{nu35n5000z.txt}\datatable
\addplot[color=violet, mark=*, only marks] table[x index=0, y index=9] from \datatable;
\addplot[color=blue, mark=*, only marks] table[x index=0, y index=12] from \datatable;
\addplot[color=red, mark=*, only marks] table[x index=0, y index=14] from \datatable;
\addplot[color=red, domain=0:40] {0.0194*x};
\addplot[color=blue, domain=0:40] {0.0117*x};
\addplot[color=violet, domain=0:40] {0.005*x};
\end{axis}
\end{tikzpicture}
\caption{$-\log S_k$ as a function of $k$ at various times. We generated scale free graphs with $N=5000$ nodes, degree $\gamma=3.5$, and death rate $\lambda=9$, and averaged over 200 trials.}
\label{zexpsir}
\end{figure}
Now, we turn to (\ref{ik}), and we find an equation for $\dot{\theta}$: \begin{equation}
\dot{\theta} = \sum \frac{k\rho_k \dot{I}_k}{\langle k\rangle} = \sum \frac{\rho_k}{\langle k\rangle} \left[k^2\theta S_k - \lambda k I_k\right] =\left[\sum \frac{k^2}{\langle k\rangle} \rho_k S_k - \lambda \right]\theta. \label{dtheta}
\end{equation}Now, using that $S_k = \mathrm{e}^{-kz/m}$: we find that:\begin{equation}
\frac{\dot{\theta}}{\dot{z}} = \frac{\mathrm{d}\theta}{\mathrm{d}z} = \frac{1}{m\langle k\rangle} \sum k^2\rho_k \mathrm{e}^{-kz/m} -\frac{\lambda}{m},
\end{equation}which implies that\begin{equation}
\theta(z) = \sum \frac{k\rho_k}{\langle k\rangle} \left(1-\mathrm{e}^{-kz/m}\right)-\frac{\lambda}{m}z = 1-\frac{\lambda z}{m} - \sum \frac{k\rho_k}{\langle k\rangle} \mathrm{e}^{-kz/m}
\end{equation}
Now, using (\ref{rhosf}), let us approximate that our graph is scale free. This will turn out to make $\theta(z)$ have (approximately) an exact expression in terms of well-understood functions:
\begin{align}
\theta +\frac{\lambda z}{m}-1 &\approx - \int\limits_m^\infty \mathrm{d}k \frac{(\gamma-1)m^{\gamma-1}}{k^\gamma} \left[\frac{\gamma-1}{\gamma-2}m\right]^{-1} k\mathrm{e}^{-kz/m} \notag \\
&=- (\gamma-2) \int\limits_z^\infty \mathrm{d}x \; \mathrm{e}^{-x} \frac{1}{z}\left(\frac{z}{x}\right)^{\gamma-1} = -(\gamma-2) z^{\gamma-2}\Gamma(2-\gamma ,z)
\end{align}Note that the $\gamma$ and $m$ dependent factors we have introduced are so that the probability distributions integrate to 1. We have also used identities out of \cite{abramowitz}: here $\Gamma(a,z)$ is the upper incomplete $\Gamma$ function. Using another identity we find \begin{equation}
\theta(z) = z^{\gamma-2} \Gamma(3-\gamma,z) + 1 - \mathrm{e}^{-z} - \frac{\lambda}{m}z.
\end{equation}We then find that we have reduced the dynamics, under fairly benign approximations, to a very simple form: \begin{equation}
\dot{z} = mz^{\gamma-2}\Gamma(3-\gamma,z) + m\left(1-\mathrm{e}^{-z}\right) - \lambda z. \label{dzdt}
\end{equation}
We can thus write down the exact mean field solution, (within our mild approximations): \begin{equation}
t = \int\limits_{z(0)}^z \frac{\mathrm{d}z^\prime}{m(1-\mathrm{e}^{-z^\prime} + z^{\prime(\gamma-2)}\Gamma(3-\gamma,z^\prime)) - \lambda z^\prime} \label{exact1}
\end{equation}Note that we require a very small $z(0)$ factor to regularize divergences -- we will discuss the physical consequences of this shortly. The physical meaning of this factor, as the initial condition of the dynamics, is clear. We should also note that by simply replacing the denominator of (\ref{exact1}) with $m \theta(z)$, we have the exact solution for an arbitrary graph.
While we have an exact solution, since it involves an integral, it is easier to just analyze (\ref{dzdt}). It is straightforward to justify by considering the asymptotic behaviors of the various terms that there are at most two fixed points: $z=0$ is always a fixed point, and if it is unstable, there is an absolutely stable fixed point at $z=z^*>0$ some finite point. To analyze the stability of the $z=0$ fixed point, about which dynamics occur, we re-write (\ref{dzdt}) as $z\rightarrow 0$ \begin{equation}
\dot{z} \approx z\left[m\frac{\Gamma(3-\gamma,z)}{z^{3-\gamma}} + m-\lambda\right].
\end{equation}
Suppose that $\gamma>3$. Using yet another identity from \cite{abramowitz} concerning the small $z$ behavior of the $\Gamma$ function term, we find that \begin{equation}
\dot{z} \approx z \left[\frac{\gamma-2}{\gamma-3}m - \lambda \right]
\end{equation}which implies the existence of an epidemic threshold: \begin{equation}
\lambda_{\mathrm{c}} = \frac{\gamma-2}{\gamma-3}m.
\end{equation}For $\lambda<\lambda_{\mathrm{c}}$, epidemics will not spread, whereas they will for $\lambda>\lambda_{\mathrm{c}}$. Since the fixed point at finite positive $z$ is always absolutely stable, we conclude that for $\lambda\ne\lambda_{\mathrm{c}}$, the dynamics are always linear near fixed points. Since these are the slow points of the dynamics, we conclude that the time scales of the dynamics, the spreading time $\tau_{\mathrm{spread}}$, and the ending time $\tau_{\mathrm{end}}$, should be \begin{equation}
\tau_{\mathrm{spread}} \sim \tau_{\mathrm{end}} \sim \int\limits_{1/N}^{\mathrm{O}(1)}\frac{\mathrm{d}z}{z} \sim \log N.
\end{equation}
Of course, we do not take our precise approximation of $\lambda_{\mathrm{c}}$ too seriously, but the key point is simply that there is an epidemic threshold, and a finite time scale of the epidemic dynamics, when $\gamma>3$. This fact is well known \cite{pastor2}.
Now, let us consider the case where $\gamma< 3$. Here, the $\Gamma$ function ratio is now divergent as $z\rightarrow 0$, and so the dominant term of the dynamics is \begin{equation}
\dot{z} \sim z^{\gamma-2}.
\end{equation}From this we find the spreading time scale is \begin{equation}
\tau_{\mathrm{spread}} \approx \int\limits_{1/N}^{\mathrm{O}(1)} \frac{\mathrm{d}z}{z^{\gamma-2}} \sim \left. z^{3-\gamma}\right|_{1/N}^{\mathrm{O}(1)} = \mathrm{O}(1).
\end{equation}
In the case of $\gamma=3$, we have that $\Gamma(0,z)\sim -\log z$, and so denoting \begin{equation}
y\equiv -\log z,
\end{equation}we find that we can approximate the dynamical equation by \begin{equation}
\dot{y} \approx -y
\end{equation}for large $y$, with initial condition $y_0\sim \log N$. This immediately gives us that \begin{equation}
\tau_{\mathrm{spread}} \sim \log\log N.
\end{equation}
It was argued heuristically, and shown numerically, in \cite{pastor2} that the growth of epidemics was faster than linear for scale free graphs with $\gamma\le 3$. Here, however, we have a more precise claim that the time scale of epidemic spreading is in fact independent of the size of the network (except in the special case $\gamma=3$). We similarly find for this case that $\tau_{\mathrm{end}} \sim \log N$.
Now that we have an exact solution and understand its important properties, the most important question is whether or not we can use the exact solution to actually determine the dynamics of various functions of interest: $S_k(t)$, $I_k(t)$ and $R_k(t)$. Of course, it will suffice to find the first two, and the first follows directly from (\ref{zsm}) and (\ref{skt}). To find $I_k(t)$, we can use the following trick: \begin{equation}
\frac{\mathrm{d}}{\mathrm{d}t} \left(\mathrm{e}^{\lambda t} I_k(t)\right) = k\mathrm{e}^{-kz(t)/m} \theta(z(t)).
\end{equation}Having found $z(t)$, we can recover \begin{equation}
I_k(t) = \int\limits_0^t \mathrm{d}s\; \mathrm{e}^{-\lambda (t-s)} k\mathrm{e}^{-kz(s)/m} \theta(z(s)).
\end{equation}where we have approximated that $I_k(0)\approx 0$. It is likely not possible to do these integrals by hand, but they could be done numerically.
Figure \ref{fig1} compares the equation (\ref{dzdt}), the result of mean field theory, to numerical simulations. We see that the qualitative sketch of the mean field trajectory is reproduced by the simulated dynamics for the range of $N$ tested, but quantitatively the curves appear shifted a bit, which is expected due to some of our approximations. Interestingly, we see that for $\gamma=3.5$, the mean field theory slightly lags behind the simulations, whereas for $\gamma=2.5$, the mean field theory leads the simulated dynamics. This suggests, perhaps, that the sharp transition observed in mean field theory between $\gamma>3$ and $\gamma<3$ is likely not quite as sharp in the actual dynamics on a network.\footnote{Another issue is that $N=2000$ may be far too small to see a difference, but we did not have the computing power available to test this.} \begin{figure}[here]
\centering
\begin{tikzpicture}
\begin{axis}[width=7cm, height=5.5cm, xlabel=$t$, ylabel=$z$, ylabel style=sloped like x axis]
\pgfplotstableread{nu35l2.txt}\datatable
\addplot[color=violet, very thick] table[x index=0, y index=1] from \datatable;
\pgfplotstableread{nu35l4.txt}\datatable
\addplot[color=blue, very thick] table[x index=0, y index=1] from \datatable;
\pgfplotstableread{nu35l8.txt}\datatable
\addplot[color=red, very thick] table[x index=0, y index=1] from \datatable;
\pgfplotstableread{testep352.txt}\datatable
\addplot[color=violet!60!white, very thick, dotted] table[x index=0, y index=1] from \datatable;
\pgfplotstableread{testep354.txt}\datatable
\addplot[color=blue!60!white, very thick, dotted] table[x index=0, y index=1] from \datatable;
\pgfplotstableread{testep358.txt}\datatable
\addplot[color=red!60!white, very thick, dotted] table[x index=0, y index=1] from \datatable;
\draw (axis cs: 1.5, 0.3) node[left] {$\gamma=3.5$};
\end{axis}
\begin{scope}[xshift=6.2cm]
\begin{axis}[width=7cm, height=5.5cm, xlabel=$t$]
\pgfplotstableread{nu25l2.txt}\datatable
\addplot[color=violet, very thick] table[x index=0, y index=1] from \datatable;
\addlegendentry{$\lambda=2$};
\pgfplotstableread{nu25l4.txt}\datatable
\addplot[color=blue, very thick] table[x index=0, y index=1] from \datatable;
\addlegendentry{$\lambda=4$};
\pgfplotstableread{nu25l8.txt}\datatable
\addplot[color=red, very thick] table[x index=0, y index=1] from \datatable;
\addlegendentry{$\lambda=8$};
\pgfplotstableread{testep252.txt}\datatable
\addplot[color=violet!60!white, very thick, dotted] table[x index=0, y index=1] from \datatable;
\pgfplotstableread{testep254.txt}\datatable
\addplot[color=blue!60!white, very thick, dotted] table[x index=0, y index=1] from \datatable;
\pgfplotstableread{testep258.txt}\datatable
\addplot[color=red!60!white, very thick, dotted] table[x index=0, y index=1] from \datatable;
\end{axis}
\end{scope}
\end{tikzpicture}
\caption{Comparison of mean field theory prediction for $z(t)$ to numerical simulations. We used $N=2000$, $m=10$ and averaged over 50 trials. The unphysical jumps in $\lambda=2$ dynamics are due to trials where all nodes with $k=10$ became infected.}
\label{fig1}
\end{figure}
It is not hard to understand qualitatively what will happen if we assume that $\rho_k$ does not describe a scale free network. In this case, we will no longer have an explicit form for the answer, but we can still understand the qualitative behavior by studying the quantity \begin{equation}
C(\gamma)\equiv \lim_{k\rightarrow\infty} \left[\frac{1}{k-m-1}\sum_{n=m}^k n^\gamma \rho_n \right].
\end{equation}We can use the divergences in $C(\gamma)$ to bound the dynamics on our given graph by replacing the graph's degree distribution with a non-normalized $\rho_k \sim k^{-\gamma}$, to find bounds in $\dot{z}\sim \theta(z)$. Crudely speaking $C(\gamma) \sim k^\gamma \rho_k$ for large $k$, but to take care of a network where some of the $\rho_k$ may be 0, we will use the above definition. If $C(3-\epsilon)=\infty$ for some $\epsilon>0$, then we conclude that $\tau_{\mathrm{spread}}\sim \mathrm{O}(1)$. The case where $C(3)<\infty$ but $C(3+\epsilon)=\infty$ for any $\epsilon>0$ implies that $\tau_{\mathrm{spread}}\sim \log \log N$, which we obtain by bounding the spread time both from below and above by bounding $\theta(z)$ by two scale free distributions of degree $\gamma=3$. If $C(3+\epsilon)<\infty$, then we conclude that $\tau_{\mathrm{spread}} \sim \log N$. For this last case, there is an epidemic threshold independent of $N$, while for the former cases, there is only an epidemic threshold vanishing as $N\rightarrow\infty$.
\subsection{SIS Epidemic?}
A natural question to ask, given our success with mean field theory above, is whether or not we can do something for the SIS epidemic. In the SIS epidemic, instead of dying (transitioning to state R), nodes transition to state S with rate $\lambda$. The mean field equations in this case are given by \cite{pastor}:\begin{equation}
\dot{I}_k = k\theta (1-I_k) - \lambda I_k.
\end{equation}
Numerous problems arise in this case. One of the major problems is that since it is possible to become susceptible again, we do not have the simple reduction of the S dynamics to a single equation. The second, critical, problem is that $\dot{\theta}$ is not proportional to $\theta$ -- instead, we get a ``tower" of dynamical equations for the probability of looking at an infected node weighted by $k^2$, $k^3$, etc. This implies that the irreversibility of the SIR epidemic is crucial for the exact solutions found above.
\subsection{STD Epidemics on Scale Free Bipartite Graphs}
A natural extension of the above discussion is the STD epidemic model on bipartite scale free graphs, as introduced in \cite{hetero}.\footnote{Actually, this paper considered the SIS epidemic. But as we just mentioned, the SIS epidemic does not have a nice solution -- at least not using our techniques.} The basic idea of this model is that there are two networks, a ``male" network and a ``female" network, such that all edges are between a male and female. The mean field theory we used in the previous parts would be a bad approximation here, because we do have two distinct types of nodes, but at the expense of doubling the number of dynamical variables to $S_{\mathrm{M}k}$, $S_{\mathrm{F}k}$, $I_{\mathrm{M}k}$ and $I_{\mathrm{F}k}$, referring to the probability that a male/female node is susceptible and male/female node is infected respectively, we can correct for this. For simplicity, let us assume that the male graph is scale free of degree $\gamma_{\mathrm{M}}$, and the female graph is scale free of degree $\gamma_{\mathrm{F}}$. The extension of the mean field equations above is straightforward:\footnote{We assume that the rates are not M/F dependent, for simplicity, as was done in \cite{hetero}.} \begin{subequations}\begin{align}
\dot{S}_{\mathrm{F}k} &= -k\theta_{\mathrm{M}}S_{\mathrm{F}k}, \\
\dot{S}_{\mathrm{M}k} &= -k\theta_{\mathrm{F}}S_{\mathrm{M}k}, \\
\dot{I}_{\mathrm{F}k} &= k\theta_{\mathrm{M}}S_{\mathrm{F}k} - \lambda I_{\mathrm{F}k} ,\\
\dot{I}_{\mathrm{M}k} &= k\theta_{\mathrm{F}}S_{\mathrm{M}k} - \lambda I_{\mathrm{M}k},
\end{align}\end{subequations}with $\theta_{\mathrm{F}}$ and $\theta_{\mathrm{M}}$ defined in the same way as before:\begin{subequations}\begin{align}
\theta_{\mathrm{F}} &= \frac{1}{\langle k\rangle_{\mathrm{F}}} \sum k\rho_{\mathrm{F}k} I_{\mathrm{F}k} ,\\
\theta_{\mathrm{F}} &= \frac{1}{\langle k\rangle_{\mathrm{M}}} \sum k\rho_{\mathrm{M}k} I_{\mathrm{M}k}.
\end{align}\end{subequations} By defining $z_{\mathrm{F}}$ and $z_{\mathrm{M}}$ as before: \begin{subequations}\begin{align}
z_{\mathrm{F}} &\equiv -\log S_{\mathrm{F}m}, \\
z_{\mathrm{M}} &\equiv -\log S_{\mathrm{M}m},
\end{align}\end{subequations}we find, using the same tricks as above,\begin{subequations}\begin{align}
\dot{z}_{\mathrm{F}} &= m \theta_{\mathrm{M}}, \\
\dot{z}_{\mathrm{M}} &= m\theta_{\mathrm{F}}, \\
\dot{\theta}_{\mathrm{F}} &= (\gamma_{\mathrm{F}}-2)mz^{\gamma_{\mathrm{F}}-3}\Gamma(3-\gamma_{\mathrm{F}},z_{\mathrm{F}})\theta_{\mathrm{M}} - \lambda\theta_{\mathrm{F}}, \\
\dot{\theta}_{\mathrm{M}} &= (\gamma_{\mathrm{M}}-2)mz^{\gamma_{\mathrm{M}}-3}\Gamma(3-\gamma_{\mathrm{M}},z_{\mathrm{M}})\theta_{\mathrm{F}} - \lambda\theta_{\mathrm{M}}.
\end{align}\end{subequations}
We have not found a way to solve these equations nearly exactly. The difficulty comes in via the mixing of $\theta_{\mathrm{F}}$ and $\theta_{\mathrm{M}}$, which render the division trick we used earlier useless. However, we can solve a simplified version of the model. Consider the case where $\lambda=0$ -- this should be a decent approximation to the case where $\lambda \ll 1$ anyways (so the epidemic spreads very rapidly), and should give us qualitative insight into the nature of spreading. In this case, we can once again employ the division trick, and we find that, just as before, using identities out of \cite{abramowitz}: \begin{subequations}\label{eq26ab}\begin{align}
\theta_{\mathrm{F}}(z_{\mathrm{F}}) &= z^{\gamma_{\mathrm{F}}-2}_{\mathrm{F}}\Gamma(3-\gamma_{\mathrm{F}},z_{\mathrm{F}}) + 1-\mathrm{e}^{-z_{\mathrm{F}}} = 1-(\gamma_{\mathrm{F}}-2)z^{\gamma_{\mathrm{F}}-2}\Gamma(2-\gamma_{\mathrm{F}},z), \\
\theta_{\mathrm{M}}(z_{\mathrm{M}}) &= z^{\gamma_{\mathrm{M}}-2}_{\mathrm{M}}\Gamma(3-\gamma_{\mathrm{M}},z_{\mathrm{M}}) + 1-\mathrm{e}^{-z_{\mathrm{M}}} = 1-(\gamma_{\mathrm{M}}-2)z^{\gamma_{\mathrm{M}}-2}\Gamma(2-\gamma_{\mathrm{M}},z).
\end{align}\end{subequations}Now, we use that \begin{equation}
\frac{\dot{z}_{\mathrm{F}}}{\dot{z}_{\mathrm{M}}} = \frac{\mathrm{d}z_{\mathrm{F}}}{\mathrm{d}z_{\mathrm{M}}} = \frac{\theta_{\mathrm{M}}}{\theta_{\mathrm{F}}}
\end{equation}to find that \begin{equation}
F(z_{\mathrm{F}};\gamma_{\mathrm{F}}) = F(z_{\mathrm{M}};\gamma_{\mathrm{M}}) \label{fzf}
\end{equation} where \begin{equation}
F(z) = \int\limits_0^z \mathrm{d}z^\prime \theta(z^\prime).
\end{equation}
Returning to our assumption that the graphs are scale free:\begin{equation}
F(z;\gamma) \equiv z -\frac{\gamma-2}{\gamma-1}\left[1-\mathrm{e}^{-z} + z^{\gamma-1}\Gamma(2-\gamma,z)\right].
\end{equation}
Now, to understand (\ref{fzf}) in the regime of interest (for small $z$), we perform asymptotic expansions on $F$. We find that the lowest order non vanishing terms are given by \begin{equation}
F(z;\gamma) \approx \left\lbrace\begin{array}{ll} \displaystyle \dfrac{\gamma-2}{2(\gamma-3)} z^2&\ \gamma > 3 \\ \displaystyle \dfrac{z^2}{2}\log\dfrac{1}{z} &\ \gamma=3 \\ \displaystyle \dfrac{\Gamma(3-\gamma)}{\gamma-1}z^{\gamma-1} &\ 2<\gamma<3 \end{array}\right..
\end{equation}
Let us look at a few examples of what this implies about the dynamics as the epidemic gets started. Suppose that $\gamma_{\mathrm{F}}>3$ and $\gamma_{\mathrm{M}} > 3$. It is easy to see that (\ref{fzf}) implies that \begin{equation}
z_{\mathrm{F}} \approx \sqrt{ \frac{(\gamma_{\mathrm{F}}-3)(\gamma_{\mathrm{M}}-2)}{(\gamma_{\mathrm{F}}-2)(\gamma_{\mathrm{M}}-3)}} z_{\mathrm{M}}, \label{zfzm1}
\end{equation}or\begin{equation}
S_{\mathrm{F}k} \approx S_{\mathrm{M}k}^{\sqrt{(\gamma_{\mathrm{F}}-3)(\gamma_{\mathrm{M}}-2)/(\gamma_{\mathrm{F}}-2)(\gamma_{\mathrm{M}}-3)}}.
\end{equation}We should not take the precise exponent here particularly seriously, but just note that the fraction of male susceptible nodes is some power of the fraction of female susceptible nodes. Now, let us consider the case where $\gamma_{\mathrm{F}}>3$ but $\gamma_{\mathrm{M}}<3$. Then we find \begin{equation}
z_{\mathrm{F}} = \sqrt{\frac{2(\gamma_{\mathrm{F}}-3)\Gamma(3-\gamma_{\mathrm{M}})}{(\gamma_{\mathrm{F}}-2)(\gamma_{\mathrm{M}}-1)}}z_{\mathrm{M}}^{(\gamma_{\mathrm{M}}-1)/2}. \label{zfzm2}
\end{equation}This is a surprising result -- for very small $t$, the female nodes get infected at a rate more than exponentially faster than to the male nodes, although this range of times is not very long.
We can also see quickly that a similar result for $\tau_{\mathrm{spread}}$ holds: if $\gamma_{\mathrm{M}},\gamma_{\mathrm{F}} >3$, the spreading dynamics are $\mathrm{O}(\log N)$; they are $\mathrm{O}(1)$ in the case of $\gamma_{\mathrm{M}}<3$. In the case of $\gamma_{\mathrm{M}},\gamma_{\mathrm{F}}>3$, this follows from (\ref{zfzm1}) and (\ref{eq26ab}): \begin{equation}
\dot{z}_{\mathrm{F}} = m\theta_{\mathrm{M}} \approx m\frac{\gamma_{\mathrm{M}}-2}{\gamma_{\mathrm{M}}-3}z_{\mathrm{M}} = m\sqrt{\frac{(\gamma_{\mathrm{M}}-2)(\gamma_{\mathrm{F}}-2)}{(\gamma_{\mathrm{M}}-3)(\gamma_{\mathrm{F}}-3)}}z_{\mathrm{F}}.
\end{equation}and similarly for $z_{\mathrm{M}}$. In the case of $\gamma_{\mathrm{M}}<3$, $\gamma_{\mathrm{F}}>3$, we have instead, using (\ref{zfzm2}) and (\ref{eq26ab}):\begin{equation}
\dot{z}_{\mathrm{F}} \sim \theta_{\mathrm{M}} \sim z_{\mathrm{M}}^{\gamma_{\mathrm{M}}-2} \sim z_{\mathrm{F}}^{2(\gamma_{\mathrm{M}}-2)/(\gamma_{\mathrm{M}}-1)}.
\end{equation}Since \begin{equation}
0 < 2\frac{\gamma_{\mathrm{M}}-2}{\gamma_{\mathrm{M}}-1} < 1 \;\;\;\;\; (2<\gamma_{\mathrm{M}}<3)
\end{equation}we conclude that growth is faster than linear, and that the spreading dynamics is O(1) for the same reason as in the SIR epidemic. In the case of $\gamma_{\mathrm{F}}>3$, $\gamma_{\mathrm{M}}=3$, we find that since $z_{\mathrm{F}}^2 \sim -z_{\mathrm{M}}^2 \log z_{\mathrm{M}}$, that \begin{equation}
\dot{z}_{\mathrm{F}} = z_{\mathrm{M}} \log \frac{1}{z_{\mathrm{M}}} \approx z_{\mathrm{F}} \sqrt{\log \frac{1}{z_{\mathrm{F}}} + \mathrm{O}(\log\log z_{\mathrm{F}})}
\end{equation} Defining $y_{\mathrm{F}}=-\log z_{\mathrm{F}}$ as we did earlier, we find that \begin{equation}
\tau_{\mathrm{spread}} \sim\int \frac{\mathrm{d}y_{\mathrm{F}}}{\sqrt{y_{\mathrm{F}}}} \sim \sqrt{\log N}
\end{equation} In the case of $\gamma_{\mathrm{M}}=\gamma_{\mathrm{F}}=3$, we can find that $\tau_{\mathrm{spread}}\sim \log\log N$ just as before.
To generate bipartite scale free networks for use in simulations, we used a similar algorithm to what is used in \cite{hetero}, which unfortunately does not guarantee that all F nodes have at least 10 edges. However, we see that this does not significantly ruin the dynamics, and they match mean field theory extremely well, as shown in Figure \ref{zexp5}, although they are a bit lower than mean field theory would predict in the range of validity. Figure \ref{zexp} shows that the fraction of susceptible nodes (for both M and F) is exponentially decaying with $k$, as mean field theory predicts. Together, these suggest that mean field theory is a valid dynamical approximation at all times, notwithstanding finite size limitations.
\begin{figure}[here]
\centering
\begin{tikzpicture}
\begin{axis}[width=10cm, height=6cm, xlabel=$k$, ylabel=$-\log S_k$, ylabel style=sloped like x axis]
\pgfplotstableread{STD25.txt}\datatable
\addplot[color=blue, mark=square, only marks] table[x index=0, y index=12] from \datatable;
\addlegendentry{female, $\gamma_{\mathrm{M}}=2.5$}
\addplot[color=blue, mark=*, only marks] table[x index=0, y index=11] from \datatable;
\addlegendentry{male, $\gamma_{\mathrm{M}}=2.5$}
\pgfplotstableread{STD35.txt}\datatable
\addplot[color=red, mark=square, only marks] table[x index=0, y index=12] from \datatable;
\addlegendentry{female, $\gamma_{\mathrm{M}}=3.5$}
\addplot[color=red, mark=*, only marks] table[x index=0, y index=11] from \datatable;
\addlegendentry{male, $\gamma_{\mathrm{M}}=3.5$}
\addplot[color=red, domain=0:33] {0.027*x};
\addplot[color=red, domain=0:33] {0.0105*x};
\addplot[color=red, mark=square, only marks] table[x index=0, y index=16] from \datatable;
\addplot[color=red, mark=*, only marks] table[x index=0, y index=15] from \datatable;
\addplot[color=red, domain=0:33] {0.0074*x};
\addplot[color=red, domain=0:33] {0.034*x};
\addplot[color=blue, domain=0:30] {0.083*x};
\addplot[color=blue, domain=0:30] {0.05*x};
\end{axis}
\end{tikzpicture}
\caption{$-\log S_{\mathrm{M}k}$ and $-\log S_{\mathrm{F}k}$ on SI STD epidemics on graphs with $N=5000$ nodes and $\gamma_{\mathrm{F}}=3.5$, averaged over 400 trials. We used times $t=0.24$ and $0.32$ for $\gamma_{\mathrm{M}}=3.5$, and $0.24$ for $\gamma_{\mathrm{M}}=2.5$, to avoid finite size effects (which become visible for the blue lines), as discussed earlier. We have checked that other parameters lead to similar linear relations.}
\label{zexp}
\end{figure}
\begin{figure}[here]
\centering
\begin{tikzpicture}
\begin{axis}[width=7cm, height=5.5cm, xlabel=$t$, ylabel=$z$, ylabel style=sloped like x axis]
\pgfplotstableread{nu35rec.txt}\datatable
\addplot[color=black, very thick] table[x index=0, y index=1] from \datatable;
\pgfplotstableread{testSTD35.txt}\datatable
\addplot[color=blue, very thick, dotted] table[x index=0, y index=1] from \datatable;
\addplot[color=pink, very thick, dotted] table[x index=0, y index=2] from \datatable;
\draw (axis cs: 0.6, 0) node[right] {$\gamma_{\mathrm{M}}=3.5$};
\end{axis}
\begin{scope}[xshift=6.2cm]
\begin{axis}[width=7cm, height=5.5cm, xlabel=$t$]
\pgfplotstableread{nu25stdM.txt}\datatable
\addplot[color=blue, very thick] table[x index=0, y index=1] from \datatable;
\addlegendentry{male}
\pgfplotstableread{nu25stdF.txt}\datatable
\addplot[color=pink!80!black, very thick] table[x index=0, y index=1] from \datatable;
\addlegendentry{female}
\pgfplotstableread{testSTD25.txt}\datatable
\addplot[color=blue!60!white, very thick, dotted] table[x index=0, y index=1] from \datatable;
\addplot[color=pink, very thick, dotted] table[x index=0, y index=2] from \datatable;
\draw (axis cs: 0.6, 0) node[right] {$\gamma_{\mathrm{M}}=2.5$};
\end{axis}
\end{scope}
\end{tikzpicture}
\caption{Comparison of $z(t)$ between theory (solid line) and simulations (dotted line) for the SI STD epidemic model. We used $N=2000$, $m=10$, and 100 trials.}
\label{zexp5}
\end{figure}
\section{Rumor Spreading} \label{rusec}
Now, let us turn the discussion to models of rumor spreading. The essential idea of the rumor spreading model is that people can be described as either unaware of the rumor (state S), actively spreading the rumor (state I), and not actively spreading the rumor, and having heard of it (state R). The key difference with the SIR epidemic is that the death rates will now change.
There are 2 possibilities. The classic rumor spreading model, which we will denote ``type IR" rumor spreading, corresponds to a situation where every edge that connects a given node in state I to a state in either I or R induces transitions to R with rate $\lambda$. We will instead consider a simplified version, which we denote ``type I" rumor spreading, where only I nodes induce such transitions. Type I rumor spreading is perhaps not as realistic as type IR, for dynamical reasons which will become clear, but it will admit an exact solution of the same type as we have found before, so we will focus our discussion on this model. First, we begin by discussing type IR rumor spreading, and describe what can be obtained from mean field theory.
\subsection{Type IR Rumor Spreading}
Let us define\begin{equation}
\psi = \sum \frac{k\rho_k S_k}{\langle k\rangle}.
\end{equation} The mean field equations are\begin{subequations}\begin{align}
\dot{S}_k &= -k\theta S_k, \\
\dot{I}_k &= k\theta S_k - \lambda k (1-\psi) I_k. \label{ik2}
\end{align}\end{subequations}
We will not find a way to nearly exactly solve the above equations, even for a scale free graph. Furthermore, essentially all of the results we find in this section can be found in \cite{nekovee}, but we repeat them here for completeness, and because we derive them in a slightly quicker way. We begin by noting that introducing $z$ as we did before, we find the exact same relation that $S_k = \mathrm{e}^{-kz/m}$. In particular, this means that (once again, for simplicity, assuming $\rho_k\sim k^{-\gamma}$)\begin{equation}
\psi(z) = \sum \frac{k\rho_k}{\langle k\rangle} S_k = \sum \frac{k\rho_k}{\langle k\rangle} \mathrm{e}^{-kz/m} \approx \frac{\gamma -1}{\langle k\rangle} \int\limits_m^\infty \mathrm{d}k \left(\frac{m}{k}\right)^{\gamma-1} \mathrm{e}^{-kz/m} = (\gamma-2)z^{2-\gamma}\Gamma(2-\gamma,z).
\end{equation}In general, we can find an expression for $\psi(z)$ for more complicated degree distributions, but we may not be able to find the exact solution. Given $\psi(z)$, (\ref{ik2}) becomes \begin{equation}
\dot{I}_k = k\theta \mathrm{e}^{-kz/m} - \lambda k(1-\psi(z))I_k.
\end{equation}
Unfortunately, it is far from obvious how to solve these differential equations exactly. Although they are linear in $I$, they involve diagonalizing a nontrivial infinite dimensional matrix. We will content ourselves to merely understanding the location of the fixed point $z^*$. To find $z^*$, we note that \begin{equation}
\frac{\mathrm{d}}{\mathrm{d}t} \langle I\rangle = \sum \rho_k\dot{I}_k = \sum k\rho_k \mathrm{e}^{-kz/m}\theta - \lambda(1-\psi(z))\sum k\rho_k I_k = \langle k\rangle [ \theta \psi(z) - \lambda\theta(1-\psi(z))].
\end{equation}At $t\rightarrow\infty$, this should go to 0, so we conclude that \begin{equation}
\psi(z^*) = \frac{\lambda}{\lambda+1}.
\end{equation}
We can say more about the state of the graph at the fixed point: the mean field theory clearly predicts that $S_k(\infty)$ decreases exponentially with $k$. This fact was known to \cite{moreno2}, but a theoretical reason was not known.
Since the focus of this paper is on discovering exact solutions, let us now turn to type I rumor spreading, which we will discover does have an exact solution.
\subsection{Type I Rumor Spreading}
Let us now turn to the simplified model of type I rumor spreading, with mean field equations \begin{subequations}\begin{align}
\dot{S}_k &= -k\theta S_k, \\
\dot{I}_k &= k\theta S_k - \lambda k\theta I_k.
\end{align}\end{subequations}It is clear that $S_k = \mathrm{e}^{-kz/m}$ as before. We now may exploit a different trick than the one we have previously used. Consider \begin{equation}
\frac{\dot{I}_k}{\dot{S}_k} = \frac{\mathrm{d}I_k}{\mathrm{d}S_k} = -1 + \lambda \frac{I_k}{S_k}. \label{eq26}
\end{equation}Then we see that by defining \begin{equation}
w_k S_k = I_k,
\end{equation}(\ref{eq26}) becomes, assuming for simplicity that $\lambda\ne 1$,\footnote{The case of $\lambda=1$ is not difficult to solve, but we do not present it in this paper.} \begin{equation}
S_k \frac{\mathrm{d}w_k}{\mathrm{d}S_k} = -1 + (\lambda-1)w_k,
\end{equation}which for appropriate initial conditions, implies \begin{equation}
\frac{1}{\lambda-1} \log \frac{(\lambda-1)w_k - 1}{-1} = \log S_k,
\end{equation}or \begin{equation}
I_k = \frac{1}{1-\lambda} \left(S_k^\lambda - S_k\right) = \frac{\mathrm{e}^{-\lambda kz/m} - \mathrm{e}^{-kz/m}}{1-\lambda}. \label{iksk}
\end{equation}
Now, from here, we can directly compute $\theta(z)$. As we expect, $\theta(z)$ has an explicit expression for a scale free graph under the sum to integral approximation: \begin{equation}
\theta(z) \approx \int\limits_m^\infty \frac{\gamma-2}{m} \mathrm{d}k \left(\frac{m}{k}\right)^{\gamma-1} \frac{\mathrm{e}^{-\lambda kz/m} - \mathrm{e}^{-kz/m}}{1-\lambda} = \frac{\gamma-2}{1-\lambda}\left[(\lambda z)^{\gamma-2}\Gamma(2-\gamma,\lambda z) - z^{\gamma-2}\Gamma(2-\gamma,z)\right]
\end{equation}and therefore obtain \begin{equation}
\dot{z} = m\theta = \frac{\gamma-2}{1-\lambda} m \left[(\lambda z)^{\gamma-2}\Gamma(2-\gamma,\lambda z) - z^{\gamma-2}\Gamma(2-\gamma,z)\right].
\end{equation}Using $\Gamma$ function identities we can re-write this expression: \begin{equation}
\dot{z} = m\frac{\mathrm{e}^{-\lambda z} - \mathrm{e}^{-z} - (\lambda z)^{\gamma-2} \Gamma(3-\gamma,\lambda z)+z^{\gamma-2}\Gamma(3-\gamma,z)}{1-\lambda}
\end{equation}
Just as before, we can find the exact solution by finding $t$ in terms of $z$, expressed as an integral. Interestingly, we should note that for type I rumor spreading it is actually far easier to extract the relevant physical information: $S_k$ and $I_k$, than for the SIR epidemic. Determining $S_k$ is the same as for the epidemics, but this time we can simply read off $I_k$ from (\ref{iksk}).
Let's analyze the behavior of this equation for small $z$. When $\gamma>3$, we use the asymptotic expansions for $z\approx 0$: \begin{equation}
\dot{z}\approx m\frac{-\lambda z + z -(3-\gamma)^{-1}(\lambda z - z)}{1-\lambda} = \frac{\gamma-2}{\gamma -3 }mz,
\end{equation}which is precisely what we would have found had we naively assumed that the short time behavior of the rumor spreading was behaving like a SIR epidemic with effective death rate of 0. Our intuition thus implies that we should have expected the absence of an epidemic threshold, which is indeed what we see. However, the intuition of approximating rumor spreading as an epidemic fails for the case of $\gamma <3$, interestingly, where the dominant asymptotic behavior near the origin comes exclusively from the $\Gamma$ functions: \begin{equation}
\dot{z}\approx m\Gamma(3-\gamma)\frac{1-\lambda^{\gamma-2}}{1-\lambda} z^{\gamma-2}. \label{lambdab1}
\end{equation}Here, interestingly, we see that the death rate has an effect on the short time dynamics even for small $z$: the $\lambda$ dependent factor behaves like $1$ for $\lambda \ll 1$, and $\lambda^{-(3-\gamma)}$ for $\lambda\gg 1$ (as expected, higher death rates suppress the growth of the epidemic). We also note that it is obvious from here that $\tau_{\mathrm{spread}}$ has the same scaling behavior as with the SIR epidemic: $\mathrm{O}(\log N)$ when $\gamma>3$, $\mathrm{O}(\log\log N)$ when $\gamma=3$, and $\mathrm{O}(1)$ for $\gamma< 3$.
Figure \ref{zexp3} shows plots of the simulated rumor spreading, compared to mean field theory. We see that at initial times, mean field theory is an excellent approximation, although it begins to significantly break down at large $z$. The reason for this will be explained in the next subsection. Figure \ref{zexp2} shows that $S_k$ is still exponentially decaying with $k$ for the rumor spreading models. While for earlier times, the optimal linear fit requires a nonzero intercept with the $z$ axis, the qualitative picture of mean field theory holds very well. \begin{figure}[here]
\centering
\begin{tikzpicture}
\begin{axis}[width=7cm, height=5.5cm, xlabel=$t$, ylabel=$z$, ylabel style=sloped like x axis]
\pgfplotstableread{2nu354.txt}\datatable
\addplot[color=red, very thick] table[x index=0, y index=1] from \datatable;
\pgfplotstableread{2nu352.txt}\datatable
\addplot[color=orange, very thick] table[x index=0, y index=1] from \datatable;
\pgfplotstableread{2nu3505.txt}\datatable
\addplot[color=blue, very thick] table[x index=0, y index=1] from \datatable;
\pgfplotstableread{2nu35025.txt}\datatable
\addplot[color=violet, very thick] table[x index=0, y index=1] from \datatable;
\pgfplotstableread{test354.txt}\datatable
\addplot[color=red!60!white, very thick, dotted] table[x index=0, y index=1] from \datatable;
\pgfplotstableread{test352.txt}\datatable
\addplot[color=orange!60!white, very thick, dotted] table[x index=0, y index=1] from \datatable;
\pgfplotstableread{test3505.txt}\datatable
\addplot[color=blue!60!white, very thick, dotted] table[x index=0, y index=1] from \datatable;
\pgfplotstableread{test35025.txt}\datatable
\addplot[color=violet!60!white, very thick, dotted] table[x index=0, y index=1] from \datatable;
\draw (axis cs: 0, 4) node[right] {$\gamma=3.5$};
\end{axis}
\begin{scope}[xshift=6.2cm]
\begin{axis}[width=7cm, height=5.5cm, xlabel=$t$]
\pgfplotstableread{2nu25l4.txt}\datatable
\addplot[color=red, very thick] table[x index=0, y index=1] from \datatable;
\addlegendentry{$\lambda=4$};
\pgfplotstableread{2nu25l2.txt}\datatable
\addplot[color=orange, very thick] table[x index=0, y index=1] from \datatable;
\addlegendentry{$\lambda=2$};
\pgfplotstableread{2nu25l05.txt}\datatable
\addplot[color=blue, very thick] table[x index=0, y index=1] from \datatable;
\addlegendentry{$\lambda=0.5$};
\pgfplotstableread{2nu25l025.txt}\datatable
\addplot[color=violet, very thick] table[x index=0, y index=1] from \datatable;
\addlegendentry{$\lambda=0.25$};
\pgfplotstableread{test254.txt}\datatable
\addplot[color=red!60!white, very thick, dotted] table[x index=0, y index=1] from \datatable;
\pgfplotstableread{test252.txt}\datatable
\addplot[color=orange!60!white, very thick, dotted] table[x index=0, y index=1] from \datatable;
\pgfplotstableread{test2505.txt}\datatable
\addplot[color=blue!60!white, very thick, dotted] table[x index=0, y index=1] from \datatable;
\pgfplotstableread{test25025.txt}\datatable
\addplot[color=violet!60!white, very thick, dotted] table[x index=0, y index=1] from \datatable;
\draw (axis cs: 0, 4) node[right] {$\gamma=2.5$};
\end{axis}
\end{scope}
\end{tikzpicture}
\caption{Comparison of $z(t)$ between theory (solid line) and simulations (dotted line) for type I rumor spreading. We used $N=2000$, $m=10$ and averaged over 50 trials. It required a time step of $\Delta t \approx 0.01$ before the simulation appeared to accurately reflect continuous time dynamics.}
\label{zexp3}
\end{figure}
\begin{figure}[here]
\centering
\begin{tikzpicture}
\begin{axis}[width=7cm, height=6cm, xlabel=$k$, ylabel=$-\log S_k \;\;$, ylabel style=sloped like x axis]
\pgfplotstableread{Rnu35n5000z2.txt}\datatable
\addplot[color=violet, mark=*, only marks] table[x index=0, y index=10] from \datatable;
\addplot[color=blue, mark=*, only marks] table[x index=0, y index=17] from \datatable;
\addplot[color=red, mark=*, only marks] table[x index=0, y index=25] from \datatable;
\addplot[color=red, domain=0:30] {0.0794*x};
\addplot[color=blue, domain=0:30] {0.0565*x};
\addplot[color=violet, domain=0:30] {0.03*x};
\end{axis}
\end{tikzpicture}
\caption{$-\log s_k$ as a function of $k$ at various times. Here we show the example of growth on a scale free graph of degree $\gamma=3.5$ with $N=5000$ nodes and death rate $\lambda=4$, averaged over 200 trials.}
\label{zexp2}
\end{figure}
\subsection{Late Time Type I Dynamics}
The above discussion focuses on the early time dynamics. For late times, we will see that type I rumor spreading is a simple example of a process where we should expect mean field theory to completely break down, something which we observed in Figure \ref{zexp3}.
Let us begin by naively assuming that mean field theory is an accurate description, and see what we find. Proceeding as before:\begin{equation}
\dot{z} \approx \frac{\gamma-2}{1-\lambda}m\left[\frac{\mathrm{e}^{-\lambda z}}{\lambda z} - \frac{\mathrm{e}^{-z}}{z}\right] \approx \frac{\gamma-2}{|1-\lambda|m} \frac{\mathrm{e}^{-\min(1,\lambda)z}}{\min(1,\lambda)z}
\end{equation}
This implies that, letting $\Lambda=\min(1,\lambda)$, \begin{equation}
\tau_{\mathrm{end}} \sim \int\limits_{\mathrm{O}(1)}^{z^*}\mathrm{d}z \; \Lambda z \mathrm{e}^{\Lambda z} \sim \Lambda z^* \mathrm{e}^{\Lambda z^*}.
\end{equation}Now, we have to be careful about $z^*$. In the type I rumor spreading, once all of an infected node's neighbors die, he will stay infected forever. Suppose we are on a fully connected graph -- then it is clear that $z^* = -\log(1/N) = \log N$, and thus \begin{equation}
\tau_{\mathrm{end}} \sim N^\Lambda \log N.
\end{equation}
This is a very interesting and strange result -- the time scale itself of the epidemic ending is extremely sensitive on the parameters of the problem, until the critical point when $\lambda= 1$, in which case, roughly speaking, the epidemic spreads by pairs becoming infected, with one of the two quickly dying off.
This expression for $\tau_{\mathrm{end}}$ is completely incorrect, however, for a graph which is not fully connected. Here, it becomes a little bit subtle to determine the correct $z^*$. The basic intuition we have proceeds as follows. Typically, the more connected a node was, the more likely it was to have gotten infected early, and to have died quickly. Therefore, the nodes which survive are the ones with fewer connections. Now, let us consider for simplicity, only the nodes which have on the order of the fewest connections, $m$. If we choose a node to ``live" and kill all of its neighbors, repeating this process until we have saved or killed all nodes, then, since we expect to kill $\sim m$ nodes each time, we should expect that $s_m \sim m^{-1}$, or $z^*\sim \log m$.
However, if the dynamics is driven to a fixed point at $z^*\sim \log m$, then we know that the mean field theory description must have completely broken down, since there is no fixed point for finite $z$. The naive guess is that since the fixed point occurs at $z^*=\mathrm{O}(1)$, the fixed point is absolutely stable, and therefore $\tau_{\mathrm{end}}\sim \log N$. We can qualitatively see this result holds up against numerical simulations, shown in Figure \ref{endt}. Interestingly, we see that the dynamics ends fastest when $\lambda\approx 1$, and becomes slower both for large and small $\lambda$. This has an intuitive interpretation -- for $\lambda\ll 1$, the ending dynamics is slow because we are waiting for death events, which take a very long time; for $\lambda\gg 1$, the ending dynamics is slow because deaths occur so fast that the rumor/infection must propagate ``one node at a time" with a creation of an I-I edge quickly followed by one of the two dying.
\begin{figure}[here]
\centering
\begin{tikzpicture}
\begin{semilogxaxis}[width=10cm, height=7cm, xlabel=$N$ (logarithmic plot), xtick={100, 200, 400, 800, 1600, 3200, 6400}, xticklabels={100, 200, 400, 800, 1600, 3200, 6400}, ylabel=$\tau_{\mathrm{end}}$, ylabel style=sloped like x axis]
\addplot[color=red, only marks, mark=*] coordinates {(100, 3.4104) (200, 4.05) (400, 5.01) (800, 5.61) (1600, 6.11) (3200, 6.85) (6400, 7.83)};
\addlegendentry{$\lambda=0.3$};
\addplot[color=orange, only marks, mark=*] coordinates {(100, 2.13) (200, 2.97) (400, 3.07) (800, 3.6) (1600, 3.843) (3200, 4.41) (6400, 4.8)};
\addlegendentry{$\lambda=0.6$};
\addplot[color=green, only marks, mark=*] coordinates { (100, 1.761) (200, 2.232) (400, 2.58) (800, 3.33) (1600, 3.55) (3200, 4.26) (6400, 4.68)};
\addlegendentry{$\lambda=1$};
\addplot[color=blue, only marks, mark=*] coordinates {(100, 2.147) (200, 2.52) (400, 3.15) (800, 3.87) (1600, 3.96) (3200, 4.83) (6400, 5.25) };
\addlegendentry{$\lambda=2$};
\addplot[color=violet, only marks, mark=*] coordinates { (100, 2.76) (200, 3.66) (400, 3.93) (800, 4.5) (1600, 5.46) (3200, 5.97) (6400, 6.9)};
\addlegendentry{$\lambda=5$};
\addplot[color=red, domain=100:6400] {1.03*ln(x)-1.32};
\addplot[color=orange, domain=100:6400] {0.6*ln(x)-0.47};
\addplot[color=green, domain=100:6400] {0.71*ln(x)-1.54};
\addplot[color=blue, domain=100:6400] {0.76*ln(x)-1.4};
\addplot[color=violet, domain=100:6400] {0.96*ln(x)-1.655};
\end{semilogxaxis}
\end{tikzpicture}
\caption{The ending time, averaged over 50 trials, on scale free graphs with $\gamma=3.5$. We can see that $t_{\mathrm{end}} \sim \log N$. To speed up simulations, we used fairly large time steps -- we do not think this should alter the qualitative nature of the end time dynamics, although this may make our simulated $\tau_{\mathrm{end}}$ too small.}
\label{endt}
\end{figure}
\section{Recommendation Spreading}\label{sad}
We now show that a very recently proposed model for recommendation in social systems \cite{blattner} also has an exact solution in terms of an integral, just as we found above. In this model, there are 3 states: a susceptible node (S), an accepting node (A), and a denying node (D). Instead of SIR-type dynamics, the dynamics of this model are as follows: if an S comes in contact with an A, it will transition to an A with rate 1, and a D with rate $\lambda$. This occurs per edge, so the mean field equations are \begin{equation}
D_k = 1-A_k-S_k
\end{equation}using conservation of probability, and \begin{subequations}\begin{align}
\dot{S}_k &= -(1+\lambda)k\theta S_k, \\
\dot{A}_k &= k\theta S_k.
\end{align}\end{subequations}Here we are using $A_k$ and $D_k$ for the fraction of nodes with $k$ edges in states A and D, respectively. From our above work, it is clear that these equations have an exact solution in terms of an integral.
For simplicity, let us focus on the case of a scale free graph. We find that \begin{equation}
\frac{\dot{S}_k}{\dot{A}_k} = \frac{\mathrm{d}S_k}{\mathrm{d}A_k} = -(1+\lambda),
\end{equation}which implies that \begin{equation}
A_k = \frac{1-S_k}{1+\lambda}.
\end{equation}This implies that, to good approximation, using $z$ as defined above: \begin{equation}
\theta = \frac{1}{1+\lambda} \frac{1}{\langle k\rangle} \sum k\rho_k A_k = \frac{1-(\gamma-2)z^{\gamma-2}\Gamma(2-\gamma,z)}{1+\lambda} = \frac{z^{\gamma-2}\Gamma(3-\gamma,z)+1-\mathrm{e}^{-z}}{1+\lambda}.
\end{equation}
We immediately see that \begin{equation}
\dot{z} = m\left[z^{\gamma-2}\Gamma(3-\gamma,z)+1-\mathrm{e}^{-z}\right]. \label{dotz2}
\end{equation}
At mean field level, we recognize this as exactly the same as SI epidemic dynamics. This is not an accident, and we will explain why this occurs shortly. Our previous analysis implies that $\tau_{\mathrm{spread}}\sim \log N$ if $\gamma>3$, $\sim \log \log N$ if $\gamma=3$ and $\sim \mathrm{O}(1)$ for $\gamma < 3$. In this case, for large $z$, the dominant term in the dynamics is actually the term 1, so we conclude that $\tau_{\mathrm{end}}\sim \log N$ for this model. Figure \ref{zexp4} compares the theoretical dynamics of this model to mean field theory, where we see excellent agreement for $\gamma=3.5$ (for short times, at least) and qualitative agreement for $\gamma=2.5$, but with the simulated $z$ a bit smaller than theoretically predicted. We should finally note that for the same reasons as in the type I rumor spreading model, $S_k$, $I_k$ and $R_k$ may be easily recovered from the mean field solution.
\begin{figure}[here]
\centering
\begin{tikzpicture}
\begin{axis}[width=7cm, height=5.5cm, xlabel=$t$, ylabel=$z$, ylabel style=sloped like x axis]
\pgfplotstableread{nu35rec.txt}\datatable
\addplot[color=black, very thick] table[x index=0, y index=1] from \datatable;
\pgfplotstableread{testrec02.txt}\datatable
\addplot[color=red, very thick, dotted] table[x index=0, y index=1] from \datatable;
\pgfplotstableread{testrec1.txt}\datatable
\addplot[color=blue, very thick, dotted] table[x index=0, y index=1] from \datatable;
\pgfplotstableread{testrec5.txt}\datatable
\addplot[color=violet, very thick, dotted] table[x index=0, y index=1] from \datatable;
\draw (axis cs: 0.6, 0) node[right] {$\gamma=3.5$};
\end{axis}
\begin{scope}[xshift=6.2cm]
\begin{axis}[width=7cm, height=5.5cm, xlabel=$t$]
\pgfplotstableread{nu25rec.txt}\datatable
\addplot[color=black, very thick] table[x index=0, y index=1] from \datatable;
\addlegendentry{theoretical};
\pgfplotstableread{testrec022.txt}\datatable
\addplot[color=red, very thick, dotted] table[x index=0, y index=1] from \datatable;
\addlegendentry{$\lambda=0.2$};
\pgfplotstableread{testrec12.txt}\datatable
\addplot[color=blue, very thick, dotted] table[x index=0, y index=1] from \datatable;
\addlegendentry{$\lambda=1$};
\pgfplotstableread{testrec52.txt}\datatable
\addplot[color=violet, very thick, dotted] table[x index=0, y index=1] from \datatable;
\addlegendentry{$\lambda=5$};
\draw (axis cs: 0.6, 0) node[right] {$\gamma=2.5$};
\end{axis}
\end{scope}
\end{tikzpicture}
\caption{Comparison of $z(t)$ between theory (solid line) and simulations (dotted line) for the recommendation spreading model. We used $N=2000$, $m=10$, and 50 trials. The significant deviations from mean field theory for $z$ suddenly increasing are due to finite size. The deviations for $z$ flattening out are due to the breakdown of mean field theory discussed below. We have cut off the trajectories once they begin to show significant deviations.}
\label{zexp4}
\end{figure}
Let us now describe why the dynamics of the recommendation spreading model are, at mean field level, SI epidemic dynamics. The answer can be seen by mapping to a simpler problem, in the following way. Define i.i.d. random variables $X_v$ for each $v\in V$, with $X_v \sim \mathrm{Bernoulli}((1+\lambda)^{-1})$, and remove from the graph $G$ all nodes $v$ with $X_v=0$. The graph we are left with, which we call $G^\prime$, can be used to understand the $t=\infty$ state of a sample path for the recommendation model, in the following way: $G^\prime$ consists of the possible nodes which will become As, if they have the chance to get infected. Now, given a set of nodes which are A at $t=0$, we conclude that a final state for the dynamics of the recommendation spreading model is given by \begin{equation}
v(t=\infty) = \left\lbrace \begin{array}{ll} \mathrm{A} &\ v \text{ not removed, in the same cluster as an initial A} \\ \mathrm{D} &\ v\text{ removed, connected to an A} \\ \mathrm{S} &\ \text{otherwise} \end{array}\right..
\end{equation}Furthermore, this final state has the same probability of occurring as the sum of all possible configurations of the ``removed node" model which lead to this same final state. Given these states at $t\rightarrow\infty$, we can determine a sample path of the recommendation model by thus treating recommendation spreading as a SI epidemic on $G^\prime$ with spreading rate 1.
This map to the SI epidemic on a reduced graph has a very interesting property, however -- it reveals that the recommendation spreading model actually has an ``epidemic threshold" in the following sense: suppose that $G^\prime$ is almost surely a collection of clusters of O(1) nodes. Then if, at $t=0$, an O(1) number of the nodes are A, at $t=\infty$ an O(1) number of nodes are A, implying that there is no recommendation ``epidemic." A recommendation epidemic can only occur when the the cluster size grows with $N$. This epidemic threshold does not occur within the context of mean field theory, and this is ultimately the crucial difference between the recommendation spreading model and the SIR-like models discussed above.
Given this understanding of the late time dynamics, we now return to Figure \ref{zexp4}. In particular (neglecting the constant factor making mean field theory differ from numerics for $\gamma=2.5$), we see that for very small $\lambda$, the only divergence from mean field theory is a finite size effect, because the probability that a giant cluster would not be present is presumably vanishingly small. However, for larger $\lambda$, the probability that disconnected clusters occur becomes larger, and the value of $z$ at which the dynamics stops suggests the frequency with which such clusters occur. For these larger values of $\lambda$, the dynamics of $z$ therefore deviates from mean field theory because the ending state of the dynamics is dependent on the existence and frequency of such clusters, and once the dynamics is dependent on graph structure, mean field theory breaks down.
\section{Conclusion}\label{concsec}
In this paper, we have shown that 4 simple models of irreversible dynamics on networks: the SIR epidemic, the SI STD epidemic, type I rumor spreading, and the new recommendation spreading model, have exact solutions at mean field level, and that these solutions hold up well in the appropriate regimes against numerical tests, differing at most by a constant scaling factor which is not too dramatic.\footnote{Why exactly such scaling factors occur is an open question -- part of the reason may be simplifications in the expression for $\theta(z)$, e.g.} Thus, these results provide a far more thorough justification that mean field theory is a valid approximation scheme for these models than previous works. Interestingly, proper regularization of divergences which can occur on heavy tailed degree distributions, such as those of scale free graphs, proved not only to be necessary mathematically, but to provide important physical insights as well.
Ultimately, the SIR epidemic models, and the type IR or I rumor spreading models, are surely oversimplifications for realistic processes (and it is likely that realistic networks have far more structure than a simple ``mean field" scale free network), so the ultimate relevance of work such as this is to understand qualitatively why network structures can lead to dramatic changes in the behavior of stochastic processes. Towards this end, knowledge of an exact solution can help to solidify intuition that more heuristic approaches give, and can suggest phenomena that heuristic approaches may miss. We have showed that the exact solutions of mean field theory, which is often a valid approximation, provide all of the physical information of interest ($S_k$, $I_k$, and $R_k$) other than information dependent on the graph structure. Finally, we were able to both provide theoretical explanations for many observed phenomena, as well as to postulate some new behaviors and observe them.
Recent work has mathematically proven some significant deviations from mean field behavior -- in particular, an absence of an epidemic threshold on scale free graphs of all degrees \cite{durrett2}. While their results do not become relevant until $N\sim 10^{12}$, they showed that nonetheless mean field theory can sometimes be outright wrong, even on random graphs where physicists are most confident in mean field theory. We hope that the (quite likely rare) existence of models whose mean field theory equations have exact solutions on arbitrary networks will provide key tests of when and where mean field theory is a valid approximation for simplified models of realistic networks and processes. Future work should focus on understanding the extent to which our techniques may be applied to more complicated models, or other classes of models which may admit similar solutions, or focusing more in depth on some of the qualitative arguments we made (e.g., if $\tau_{\mathrm{spread}}\sim $ O(1)) which are not readily observable from our basic simulations.
\section*{Acknowledgements}\addcontentsline{toc}{section}{Acknowledgements}
I would like to thank Daniel Fisher, Greg ver Steeg and Jay Wacker for helpful comments and for encouraging me to continue past my initial calculations.
\bibliographystyle{plain}
\addcontentsline{toc}{section}{References}
|
{
"timestamp": "2012-06-28T02:04:17",
"yymm": "1206",
"arxiv_id": "1206.6294",
"language": "en",
"url": "https://arxiv.org/abs/1206.6294"
}
|
\section{Introduction}
\label{intro}
Experimental and theoretical investigations of plasmon excitations in metallic nano-crystals rapidly grew up mainly due to possible applications in photo-voltaics and microelectronics. A significant enhancement of absorption of incident light in photodiode-systems with active surfaces covered with nano-size metallic particles (of Au, Ag or Cu) with planar density $10^8$-$10^{10}$/cm$^2$ was observed \cite{wzr3a,wzmocn1,wzr2,konk,wzmocn2,mof}. These findings are of practical importance for enhancement of solar cell efficiency, especially for developing of thin film cell technology. On the other hand, hybridized states of surface plasmons and photons result in plasmon-polaritons \cite{zastos,maradudin}, which are of high importance for applications in photonics and microelectronics \cite{zastos,deabajo}, in particular, for sub-diffraction transportation of converted light energy and information in metallically modified structures in nano-scale \cite{maradudin,atwater1}.
Surface plasmons in nano-particles have been widely investigated since their classical description by Mie \cite{Mie}. Many particular studies, including numerical modelling of multi-electron clusters, have been carried out \cite{brack,brack1}. They were mostly developments of Kohn-Sham attitude in form of LDA (Local Density Approximation) or TDLDA (Time Dependent LDA) for small metallic clusters only \cite{brack,brack1,ekardt,ekardt2,kresin}, up to ca 200 electrons (limited for larger clusters by numerical calculation constraints that grow rapidly with the number of electrons). The random phase approximation (RPA) was formulated \cite{rpa} for description of volume plasmons in bulk metals and utilized also for confined geometry mainly in a numerical or semi-numerical manner \cite{brack,brack1,kresin}. Usually, in these analyses the jellium model was assumed for description of positive ion background in the metal and the dynamics was addressed to the electron system only \cite{brack,ekardt,kresin}. Such a model is preferable for clusters of simple metals, including noble metals (also transition and alkali metals).
In the present paper we apply the RPA description using a semiclassical approach for a large metallic nano-sphere (with radius of several tens nm, and with $10^5$-$10^7$ electrons), in an all-analytical calculus version \cite{jacak5}. The electron liquid oscillations of compressional and translational type result in excitations inside the sphere and on its surface, respectively. They are referred to as volume and surface plasmons. Damping of plasmons due to electron scattering and due to radiation losses (accounted for via the Lorentz friction force) is included. The shift of the resonance frequency of dipole-type surface plasmons (only such plasmons are induced by homogeneous time-dependent electric field), due to damping phenomena, well fits with the experimental data for various nano-sphere radii \cite{jacak11}.
Collective dipole-type surface plasmon oscillations in the linear chain of metallic nano-spheres were then analyzed and wave-type plasmon propagation along the chain was described \cite{jacak10,maradudin}. A coupling in the near field regime between oscillating dipoles in neighboring nano-spheres, together with retardation effects for energy irradiation, allowed for appearance of undamped propagation of plasmon waves (called plasmon-polaritons) along the chain in the experimentally realistic region of values of the separation of spheres in the chain and of the nano-sphere radii. This effect is of a particular significance for plasmon arranged non-dissipative and sub-diffraction transport of light converted energy and information along metallic chains for possible applications in nano-electronics.
The undamped mode of plasmon-polaritons occurs, however, on the rim of stability of the linear approach. The zero damping rate separates the region with positive its value (corresponding to ordinary attenuation of plasmon-polaritons) and the region with negative damping rate (corresponding to unstable modes). The latter exhibits unphysical behavior being the artefact of the linear approximation. In order to regularize the description, the nonlinear corrections must be thus included. The nonlinear corrections may be associated with the Lorentz friction forces. Small relativistic contribution to this friction has nonlinear character and quenches instable divergent modes. In the result, the instability region of linear approach is entirely covered by the region of undamped wave propagation with the amplitude accommodated, however, to nonlinearity scale and independent of the initial condition (despite of its magnitude). This phenomenon, familiar in other nonlinear systems \cite{mit}, seems to be of a particular significance for understanding of collective plasmon excitations with interesting possible applications.
\section{Damping of plasmons in large nano-spheres}
\label{sec:1}
Within the RPA in semiclassical limit \cite{jacak5},
the solution of the dynamical equation for local density of electrons in a metallic nano-sphere with the radius $a$, can be decomposed into two parts related to the distinct domains:
\begin{equation}
\delta \tilde{\rho}( {\bmb r,t})=\left\{
\begin{array}{l}
\delta \tilde{\rho}_1( {\bmb r,t}), \;for\; r<a,\\
\delta \tilde{\rho}_2( {\bmb r,t}), \;for\; r\geq a,\; ( r\rightarrow a+),\\
\end{array}
\right.
\end{equation}
corresponding to the volume and surface excitations, respectively. These two parts of local electron density fluctuations
satisfy the equations \cite{jacak5}:
\begin{equation}
\label{e20}
\frac{\partial^2 \delta \tilde{\rho}_1 ({\bmb r},t) }{\partial t^2}=\frac{2}{3} \frac{\epsilon_F}{m}\nabla^2 \delta \tilde{\rho}_1( {\bmb r},t)-
\omega_p^2 \delta \tilde{\rho}_1( {\bmb r},t),
\end{equation}
and
\begin{equation}
\label{e21}
\begin{array}{l}
\frac{\partial^2 \delta \tilde{\rho}_2 ({\bmb r},t) }{\partial t^2} =-
\frac{2}{3m} \nabla\left\{\left[\frac{3}{5}\epsilon_F n_e+\epsilon_F \delta \tilde{\rho}_2
{\bmb r},t)\right]\frac{\bmb r}{r}\delta\right\}\\
- \left[\frac{2}{3} \frac{\epsilon_F}{m}\frac{\bmb r}{r}\nabla \delta \tilde{\rho}_2( {\bmb r},t)
+ \frac{\omega_p^2}{4\pi} \frac{\bmb r}{r}\nabla \int d^3r_1 \frac{1}{|{\bmb r}-{\bmb r}_1|}\right.\\
\left.\times \left(\delta \tilde{\rho}_1( {\bmb r}_1 ,t)
\Theta(a-r_1)
+\delta \tilde{\rho}_2( {\bmb r}_1 ,t)\Theta(r_1-a)\right)\right]\delta,\\
\end{array}
\end{equation}
where, $\omega_p^2=\frac{4\pi n_e e^2}{m}$ is the bulk plasmon frequency,
$\Theta$ is the Heaviside step function, $\delta=\delta(r-a)$. The analysis and solutions of the above equations are performed in details as presented in Ref.
\onlinecite{jacak5}, resulting in determination of plasmon self-mode spectrum, both for volume and surface modes.
Nevertheless, this treatment did not account for plasmon attenuation.
One can, however, include damping of plasmons in a phenomenological manner, adding an attenuation term to plasmon dynamic equations, i.e., by adding the term,$-\frac{2}{\tau_0}\frac{\partial \delta\rho({\bmb r},t)}{\partial t}$, to the r.h.s.
of both Eqs (\ref{e20}) and (\ref{e21}), taking advantage of their oscillatory form \cite{jacak5}. Except of homogeneous equations (\ref{e20}) and (\ref{e21}) determining self-frequencies of plasmon modes, the dual inhomogeneous would be written, with explicit expression of forcing factor. This factor would be the time dependents electric field, including electrical component of e-m wave.
For e-wave frequency in resonance with plasmons in the metalillic nanosphere, the wave-length (being of order of 500 nm) highly exceeds the nanosphere size (with radius $10-50$ nm), thus the dipole regime is in force.
For the homogeneous forcing field ${\bmb E}(t)$ (which corresponds to dipole approximation satisfied for $a\sim 10-50$ nm, when $\lambda \sim 500$ nm), only dipole surface mode can be excited and the electron response
resolves to a single dipole type mode, described by the function $Q_{1m}(t)$.
The function $Q_{1m}(t)$ satisfies the equation:
\begin{equation}
\label{qqq}
\begin{array}{l}
\frac{\partial^2Q_{1m}(t)}{\partial t^2}+\frac{2}{\tau_0}\frac{\partial Q_{1m}(t)}{\partial t}+\omega_1^2 Q_{1m}(t)\\
=\sqrt{\frac{4\pi}{3}}\frac{en_e}{m}\left[E_z(t)\delta_{m0}+\sqrt{2}\left(E_x(t)\delta_{m1}
+ E_y(t)\delta_{m-1}\right)\right],\\
\end{array}
\end{equation}
where $\omega_1=\omega_{01}=\frac{\omega_p}{\sqrt{3\varepsilon}}$ (it is a dipole-type surface plasmon Mie frequency \cite{Mie}).
Only this function contributes to the plasmon response to the homogeneous electric field.
Thus for the homogeneous forcing field, electron density fluctuations \cite{jacak5}:
\begin{equation}
\label{oscyl}
\delta \rho({\bmb r},t)=\left\{
\begin{array}{l}
0,\;\; r<a,\\
\sum\limits_{m=-1}^{1}Q_{1m}(t)Y_{1m}(\Omega)\; r\geq a,\; r\rightarrow a+.\\
\end{array} \right.
\end{equation}
For plasmon oscillations given by Eq. (\ref{oscyl}) one can calculate the corresponding dipole,
\begin{equation}
\label{dipolek}
{\bmb D}(t)= e\int d^3r {\bmb r}\delta\rho({\bmb r},t)= \frac{4\pi}{3}e{\bmb q}(t)a^3,
\end{equation}
where,
$Q_{11}(t)=\sqrt{\frac{8\pi}{3}}q_x(t)$, $Q_{1-1}(t)=\sqrt{\frac{8\pi}{3}}q_y(t)$,\\
$Q_{10}(t)=\sqrt{\frac{4\pi}{3}}q_x(t)$
and ${\bmb q}(t)$ satisfies the equation (cf. Eq. (\ref{qqq})),
\begin{equation}
\label{dipoleq}
\left[\frac{\partial^2}{\partial t^2}+ \frac{2}{\tau_0} \frac{\partial}{\partial t} +\omega_1^2\right] {\bmb q}(t)=\frac{en_e}{m}
{\bmb E}(t).
\end{equation}
There are various mechanisms of plasmon damping, which could be effectively accounted for via phenomenological
oscillator type damping term. All types of scattering phenomena, including electron-electron and electron-phonon interactions,
as well contribution of the boundary scattering effect \cite{atwater}, cause significant attenuation of plasmons, in particular,
in small metal clusters. All these contributions to damping time ratio
scale as $\frac{1}{a}$ and are of lowering significance with the radius growth. In the following subsection we
argue that damping of plasmons caused by radiation losses scales conversely, as $a^3$, and for large
nano-spheres this channel dominates plasmon attenuation.
\subsection{Lorentz friction for plasmons}
Plasmon oscillations
are themselves a source of the e-m radiation. This radiation takes away the energy of plasmons resulting
in their damping, which can be described as the Lorentz friction force reducing charge oscillations \cite{lan}. This damping was not included in $\tau_0$ in Eq. (\ref{dipoleq}). This $\tau_0$ accounted only for
scattering of electrons on other electrons, on defects, on
phonons and on nanoparticle boundary---all they lead to damping rate expressed by the simplified formula \cite{atwater}:
\begin{equation}
\label{form}
\frac{1}{\tau_0}\simeq \frac{v_F}{2\lambda_b }+\frac{cv_F}{2a},
\end{equation}
where, $C$ is the constant of unity order, $a$ is the nano-sphere radius, $v_F$ is the Fermi velocity in metal,
$\lambda_b$ is the electron free path in bulk (including scattering of electrons on other electrons,
on impurities and on phonons \cite{atwater}); for Ag, $v_F=1.4\times 10^6$ m/s and $\lambda_b\simeq 57$ nm
(at room temperature); the latter term in the formula (\ref{form}) accounts for scattering of electrons on the boundary of
the nanoparticle, while the former one corresponds to scattering processes similar as in bulk. The other effects, as
the so-called Landau damping (especially important in small clusters \cite{jo,ekardt2}), corresponding to decay of
plasmon for high energy particle-hole pair, are of lowering significance for nano-sphere radii larger than $ 2-3$ nm
\cite{jo} and are completely negligible for radii larger than 10 nm. Note that the similarly lowering role with the
radius growth plays also electron liquid spill-out effect \cite{brack,ekardt}, though it was of
primary importance for small clusters \cite{brack,kresin}.
The electron
friction caused by e-m wave emission can be described as the additional electric field \cite{lan},
\begin{equation}
\label{lorentz}
{\bmb E}_L = \frac{2}{3\varepsilon^{3/2}v^3}\frac{\partial^3{\bmb D}(t)}{\partial t^3},
\end{equation}
where $v=\frac{c}{\sqrt{\varepsilon}}$ is the light velocity in the dielectric medium, and ${\bmb D}(t)$ is the dipole of the nano-sphere.
According to Eq. (\ref{dipolek}) we arrive at the following:
\begin{equation}
\label{lor}
{\bmb E}_L= \frac{2e}{3\varepsilon v^2}\frac{4\pi}{3}a^3\frac{\partial^3{\bmb q}(t)}{\partial t^3}.
\end{equation}
Substituting this into Eq. (\ref{dipoleq}), we get,
\begin{equation}
\begin{array}{l}
\left[\frac{\partial^2}{\partial t^2}+ \frac{2}{\tau_0} \frac{\partial}{\partial t} +\omega_1^2\right] {\bmb q}(t)\\
=\frac{en_e}{m}
{\bmb E}(t) +\frac{2}{3\omega_1}\left(\frac{\omega_1a}{v}\right)^3\frac{\partial^3{\bmb q}(t)}{\partial t^3}.
\end{array}
\end{equation}
If one rewrites the above equation (for ${\bmb E}$=0) in the form,
\begin{equation}
\label{appr1}
\left[\frac{\partial^2}{\partial t^2} +\omega_1^2\right] {\bmb q}(t)=
\frac{\partial}{\partial t}\left[ -\frac{2}{\tau_0} {\bmb q}(t) +
\frac{2}{3\omega_1}\left(\frac{\omega_1a}{v}\right)^3\frac{\partial^2{\bmb q}(t)}{\partial t^2}\right],
\end{equation}
thus, one notes that the zeroth order approximation (neglecting attenuation) corresponds to the equation:
\begin{equation}
\label{appr}
\left[\frac{\partial^2}{\partial t^2} +\omega_1^2\right] {\bmb q}(t)= 0.
\end{equation}
In order to solve Eq. (\ref{appr1}) in the next step of perturbation iteration, one can substitute, in the r.h.s. of this equation,
$\frac{\partial^2{\bmb q}(t)}{\partial t^2}$ by $-\omega_1^2 {\bmb q}(t) $ (acc. to Eq. (\ref{appr})).
Therefore, if one assumes the above estimation,
$ \frac{\partial^3{\bmb q}(t)}{\partial t^3}\simeq -\omega_1^2 \frac{\partial{\bmb q}(t)}{\partial t}$,
one can include the Lorentz friction in a renormalized damping term:
\begin{equation}
\label{ratio}
\left[\frac{\partial^2}{\partial t^2}+ \frac{2}{\tau} \frac{\partial}{\partial t} +\omega_1^2\right] {\bmb q}(t)=\frac{en_e}{m}
{\bmb E}(t) ,
\end{equation}
where,
\begin{equation}
\label{tau}
\frac{1}{\tau}=\frac{1}{\tau_0}+\frac{\omega_1}{3}\left(\frac{\omega_1 a}{v}\right)^3\simeq \frac{v_F}{2\lambda_B}+\frac{Cv_F}{2a}
+ \frac{\omega_1}{3}\left(\frac{\omega_1 a}{v}\right)^3,
\end{equation}
and we used for $\frac{1}{\tau_0}\simeq \frac{v_F}{2\lambda_B}+\frac{Cv_F}{2a} $
\cite{atwater}.
The renormalized damping causes a change in the shift of self-frequencies of free surface plasmons,
$\omega_1'=\sqrt{\omega_1^2-\frac{1}{\tau^2}}$, which can be compared with the experimental observations for various nanosphere radii \cite{jacak11}.
Note also, that one can verify \cite{jacak11} the above calculated Lorentz friction contribution to plasmon damping by the
estimation of the energy
transfer in the far-field zone (which can be expressed by the Poynting vector) and via comparison with the energy loss of
plasmon oscillations. We have arrived \cite{jacak5,jacak11} at the same formula for damping time rate as given by Eq. (\ref{ratio}).
The radius dependent shift of the resonance resulting due to strong irradiation-induced plasmon damping
was verified experimentally \cite{jacak11} by measurement of light extinction in colloidal solutions of
nanoparticles with different size (it has been done \cite{jacak11} for Au, $10-80$ nm, and Ag, $10-60$ nm). These
measurements clearly support the $a^3$ plasmon damping scaling, as described above for the far-field zone radiation losses in a dielectric surroundings.
If, however, in the vicinity of the nano-sphere the another charged system is located, the situation would change. For instance, in the case
when the nano-sphere is deposited on the semiconductor surface, the near-field coupling of plasmons with semiconductor
band electrons must be included.
\section{Enhancement of energy transfer from plasmons to electric receiver located in the near-field zone}
Even if the derivation of plasmon dynamics equation in the form of effective harmonic oscillator equation is rigorous upon quantum approach of quasiclassical RPA method \cite{jacak5}, the inclusion of plasmon attenuation of scattering type and of radiation losses type needs some phenomenological assumptions. They resolve themselves to extension of quantum RPA harmonic oscillator formulation to the damped oscillator equation form with attenuation described by heuristically assumed damping rates. It has been proved \cite{jacak5,jacak15} that radiation losses, in the case of the free far-field zone radiation (i.e., in the case of vacuum or dielectric surroundings of metallic nano-sphere with oscillating plasmons), can be accounted for as the Lorentz friction force \cite{lan}, in the manner as described in the previous section. When in the near-field zone (closer than the wave length corresponding to plasmon frequency) the energy receiver (i.e., other system of chargesv, like semiconductor with its band system or another metallic nano-sphere as in the chain) is located the irradiation losses are dominated by energy transfer via this near-field zone coupling channel. Presence of the charged system of the energy receiver in the vicinity of e-m emitting nanosphere with plasmons, modifies a retarded e-m potential of the emitting system and this modifies the Lorentz friction formula, which had been derived, in the standard form, for the dielectric surroundings \cite{lan}. In particular, an enhancement of plasmon radiation losses in the case when the nanoparticles with dipole Mie surface plasmons (excited by incident external light) are deposited on the semiconductor surface, lies behind the observed PV efficiency growth in new generation of solar cells, metallically modified \cite{wzr3a,wzmocn1,wzr2,konk,wzmocn2,mof}. In this case, the related attenuation rate can be also estimated by application of the Fermi golden rule to the semiconductor inter-band transitions induced by dipole near-field coupling with plasmons \cite{jacak5,jacak15}. As it was proved \cite{jacak5}, the resulting attenuation rate scales with nano-sphere radius, $a$, in different manner in comparison to far-field radiation, and with some correction and renormalization expressed in terms of the band system parameters \cite{jacak5}. One can expect the similar behavior in the case of the near-field coupling between nano-spheres in the chain, but for the sake of effectiveness of modeling one can assume that related attenuation rate has the form as that for the standard Lorentz friction renormalized only by some coefficient phenomenologically assumed in order to account for the modification of e-m potential by the receiver system presence.
\section{Nonlinear corrections to Lorentz friction force}
Let us consider a metallic nano-sphere located (the center) in $\mathbf{R}_0$. The electric dipole of electrons (fluctuation of electron density beyond the uniform distribution compensated by positive jellium) equals to,
\begin{equation}
\mathbf{D}(\mathbf{R}_0,t)=e\int_V\delta \rho (\mathbf{r},t)\mathbf{r}d^3r.
\end{equation}
This dipole corresponds to surface plasmons of dipole type which oscillates with Mie frequency $\omega_1=\omega_p/\sqrt{3\varepsilon}$ \cite{jacak5}, where $\omega_p$ is bulk plasmon frequency, $\varepsilon$ is the dielectric constant of the surrounding medium. These plasmons are not everlasting excitations and are damped due to scattering phenomena with the damping rate,
$\frac{1}{\tau_0}=\frac{v_F}{2a}+\frac{Cv_F}{\lambda_b}$. For large nano-spheres the much more effective mechanism of plasmon damping are, however, irradiation energy losses, which for the case of irradiation to far-field zone can be expressed by the Lorentz friction \cite{jacak11,lan}. Assuming that electrons in the nano-sphere have positions $\mathbf{r}_i$ and assuming static jellium, the dipole of the nano-sphere,
$\mathbf{D}(\mathbf{R}_0,t)=e\sum_{i=1}^{N_e}\mathbf{r}_i=eN_e\mathbf{r}_e(t)$, where
$\mathbf{r}_e =\sum_{i=1}^{N_e}\mathbf{r}_i/N_e$ is the mass center of the electron system. In the case of dynamics, the velocity of the mass center equals to, $\mathbf{v}_e=\sum_{i=1}^{N_e}\mathbf{v}_i/N_e$.
On the charge $eN_e$, located in the mass center $\mathbf{r}_e(t)$ acts a Lorentz friction force \cite{lan},
\begin{equation}
\mathbf{f}_L=\frac{2}{3}(eN_e)^2\left[\frac{d^2\mathbf{u}}{ds^2}-\mathbf{u}
\left(\frac{dU_j}{ds}\right)^2\right],\;\;j=1,...,4,
\end{equation}
where,
$ds=cdt\sqrt{1-v_e^2/c^2}$,
$$
U_j=\left\{ \begin{array}{l}
\mathbf{u}=\mathbf{v}_e/(c\sqrt{1-v_e^2/c^2})\\
u_4=i/\sqrt{1-v_e^2/c^2}\\
\end{array}
\right. ,\;\;
U_j^2=-1.
$$
Up to terms of order $v_e^2/c^2$ with respect to the main term, one can write the electric field equivalent to the Lorentz friction force,
\begin{equation}
\begin{array}{l}
\mathbf{E}_L(t)=\frac{\mathbf{f}_L}{eN_e}
=\frac{2}{3}(eN_e)\frac{1}{c^3}
\left\{\frac{d^2\mathbf{v}_e}{dt^2}+\frac{1}{c^2}\left[\frac{3}{2}\frac{d^2\mathbf{v}_e}{dt^2}
v_e^2\right.\right.\\
\left.\left.+3\frac{d\mathbf{v}_e}{dt}\left(\mathbf{v}_e\cdot \frac{d\mathbf{v}_e}{dt}\right)+\mathbf{v}_e\left(\mathbf{v}_e\cdot \frac{d^2\mathbf{v}_e}{dt^2}\right)\right]\right\}.\\
\end{array}
\end{equation}
Next, using dimensionless variables,
$t'=t\omega_1$, $\mathbf{R}(t')=\frac{\mathbf{r}_e(t)}{a}$, $\dot{\mathbf{R}}(t')=\frac{d\mathbf{r}_e(t)}{a\omega_1dt}=\frac{\mathbf{v}_e}{a\omega_1}$,
$\ddot{\mathbf{R}}(t')=\frac{d^2\mathbf{r}_e(t)}{a\omega_1^2dt^2}=\frac{d\mathbf{v}_e}{a\omega_1^2dt}$,
$\stackrel{...}{\mathbf{R}}(t')=\frac{d^2\mathbf{v}_e(t)}{a\omega_1^3dt^2} $,
(dots indicate derivatives with respect to $t'$), one can write out the dynamical equation in a convenient form.
Taking into account that the dipole corresponding to surface plasmons,
\begin{equation}
\label{eq1}
\bmb{D}=eN_{e}a\bmb{R},
\end{equation}
satisfies equation of oscillatory-type, one can write it in the form (incorporating also the Lorentz friction force),
\begin{equation}
\label{dipol}
\begin{array}{l} \stackrel{..}{\mathbf{R}}+\stackrel{}{\mathbf{R}}+\frac{2}{\tau_0\omega_1}\stackrel{.}{\mathbf{R}}
=\frac{2}{3}\left(\frac{\omega_p a}{\sqrt{3\varepsilon}c}\right)^3\left\{\stackrel{...}{\mathbf{R}}\right.\\
\left.+\left(\frac{\omega_p a}{\sqrt{3\varepsilon}c}\right)^2\left[\frac{3}{2}\stackrel{...}{\mathbf{R}} (\stackrel{.}{\mathbf{R}}\cdot \stackrel{.}{\mathbf{R}})
+3\stackrel{..}{\mathbf{R}}(\stackrel{.}{\mathbf{R}}\cdot \stackrel{..}{\mathbf{R}})+
\stackrel{.}{\mathbf{R}}(\stackrel{.}{\mathbf{R}}\cdot \stackrel{...}{\mathbf{R}})\right]\right\},\\
\end{array}
\end{equation}
the terms on r.h.s. of the above equation describe the Lorentz friction including relativistic nonlinear corrections (in bracket) beyond the ordinary main linear term $\sim \stackrel{...}{\mathbf{R}}$, as given by (\ref{lorentz}).
For the case when $\frac{1}{\tau_0\omega_1}\simeq\left(\frac{\omega_pa}{c\sqrt{3\varepsilon}}\right)^3\ll 1$ (well fulfilled for nano-spheres with radii $10-50$ nm, Au or Ag), one can apply perturbation method of solution, and in zero order perturbation assume $\ddot{\mathbf{R}}+\mathbf{R}=0$. In the next step of perturbation one can thus substitute $\stackrel{..}{\mathbf{R}}=-\stackrel{}{\mathbf{R}}$ and
$\stackrel{...}{\mathbf{R}}=-\stackrel{.}{\mathbf{R}}$ in the r.h.s. of the Eq. (\ref{dipol}).
Let us consider first a single metallic nano-sphere with dipole type surface oscillations with the dipole $\mathbf{D}$.
In the framework of the perturbation method of solution of dynamical equation of oscillatory type for the dipole, Eq. (\ref{dipol}), in the first order of perturbation, attains the following form (including the damping of plasmons due to scattering with the rate $\frac{1}{\tau_0}$ and due to radiation losses accounting for the linear term of Lorentz friction, while the r.h.s. of the equation (\ref{eq4}) expresses nonlinear corrections to Lorentz friction),
\begin{equation}
\label{eq4}
\begin{array}{l}
\ddot{\bmb{R}}+\bmb{R}+\left[\frac{2}{\tau_0 \omega_{1}}+\frac{2}{3}\left(\frac{\omega_p a}{\sqrt{3\varepsilon}c}\right)^{3}\right]\dot{\bmb{R}}\\
=\frac{2}{3}\left(\frac{\omega_p a}{\sqrt{3\varepsilon}c}\right)^{5}\left\{-\frac{5}{2}\dot{\bmb{R}}
\left(\dot{\bmb{R}}\cdotp\dot{\bmb{R}}\right)+3\bmb{R}\left(\dot{\bmb{R}}\cdotp\bmb{R}\right)\right\}.\\
\end{array}
\end{equation}
The above nonlinear equation is complicated in mathematical sense and advanced methods of solutions must be applied, as described in \cite{mit}. According to the special asymptotic methods for solution of nonlinear differential equation (\ref{eq4}), one can find the solution in the following form ($\bmb{R}=R\frac{\bmb{r}}{r}$),
\begin{equation}
\label{eq5}
\begin{array}{l}
R(t)=\frac{A_0 e^{-\frac{t}{\tau}}}{\sqrt{1+\frac{9}{8}\gamma {A_0}^2\left(1-e^{-\frac{2t}{\tau}}\right)}}\cos\left(\omega_1 t+\Theta_0\right),\\
\end{array}
\end{equation}
with
$\frac{1}{\tau \omega_1}=\frac{1}{\tau_0 \omega_1}+\frac{1}{3}\left(\frac{\omega_p a}{\sqrt{3\varepsilon}c}\right)^3\approx\frac{1}{3}\left(\frac{\omega_p a}{\sqrt{3\varepsilon}c}\right)^3$ (what is satisfied for $a$ larger than 10 nm),
$\gamma=\tau \omega_1\frac{1}{3}\left(\frac{\omega_p a}{\sqrt{3\varepsilon}c}\right)^5.$
In the formulae (\ref{eq5}) both coefficients $\frac{1}{\tau \omega_1}$ and $\gamma$ can be renormalized eventually by the mentioned above phenomenological factor accounting for the change of e-m potential caused by the presence of energy receivers in near-field zone of plasmons on the considered metallic nano-sphere (still holding here 1 for simplicity). From the form of equation (\ref{eq5}) it follows that $\frac{1}{\tau \omega_1}$ is always positive. Note that the scattering term, $\frac{1}{\tau_0}=\frac{v_F}{2a}+\frac{Cv_F}{2\lambda_b}$, is negligible (for nano-sphere radius beyond 10 nm) in comparison with the linear contribution of the Lorentz friction, as it is demonstrated in Fig. \ref{fig1}.
The scale of the nonlinear corrections is given by the coefficient $\gamma\approx 10^{-4}\left(\frac{a[nm]}{10}\right)^2$. As this coefficient is small, one can neglect the related contribution in the denominator for the dipole solution (\ref{eq5}), which results in ordinary linear solution of damped oscillations. It means that the nonlinear corrections to the Lorentz friction have no significance in the case of plasmon oscillations of a single nano-sphere. This situation changes, however, considerably in the case of collective plasmon excitation propagating along the metallic nano-chain, as it will be described in the following paragraph.
\begin{figure}
\scalebox{1.0}{\includegraphics{fig1pls.eps}}
\caption{Contribution to the damping rate of surface plasmon oscillations in the nano-sphere versus the nano-sphere radius, including the scattering attenuation (green line) and the linear Lorentz friction damping (blue line); for radii greater than ca 10 nm the second channel dominates in overall damping (red line)}
\label{fig1}
\end{figure}
\section{Collective plasmon wave-type propagation along the nano-chain in the nonlinear regime}
In the case of the metallic nano-chain one has to take into account the mutual affecting of nano-spheres in the chain. Assuming that in the sphere located in the point $\mathbf{r}$ we deal with the dipole $\mathbf{D}$, then in the other place $\mathbf{r}_0$ ($\mathbf{r}_0$ is fixed to the end of $\mathbf{r}$) the dipole type electric field attains the form as follows (including electro-magnetic retardation),
\begin{equation}
\label{eq6}
\begin{array}{l}
\bmb{E}\left(\bmb{r},\bmb{r}_0,t\right)=\frac{1}{\epsilon {r_0}^3}\left\{3\bmb{n}_0\left(\bmb{n}_0\cdotp\bmb{D}\left(\bmb{r},t-\frac{r_0}{v}\right)\right)\right.\\
\left.-\bmb{D}
\left(\bmb{r},t-\frac{r_0}{v}\right)\right\},\;\;
\bmb{n}_0=\frac{\bmb{r}_0}{r_0}, v=\frac{c}{\sqrt{\epsilon}}.
\end{array}
\end{equation}
This allows for writing out the dynamical equation for plasmon oscillations at each nano-sphere of the chain, which can be numbered by integer $l$ ($d$ will denote the separation between nano-spheres in the chain; vectors $\mathbf{r}$ and $\mathbf{r}_0$ are collinear, if the origin is associated with one of nano-spheres in the chain). The first term of the right-hand-side in the following formula (\ref{dipoll}) describes the dipole type coupling between nano-spheres \cite{jacak10} and the other two terms correspond to contribution due to plasmon attenuation (in the latter term the Lorentz friction caused electric field accounts also for nonlinear corrections). The index $\alpha$ enumerates polarizations, longitudinal and transversal ones with respect to the chain orientation.
\begin{equation}
\label{dipoll}
\begin{array}{l}
\ddot{R}_{\alpha}+R_{\alpha}\left(ld,t\right)
=\sigma_{\alpha}\frac{a^3}{d^3}
\sum\limits_{m=-\infty,m\neq l}^{\infty}\frac{R_{\alpha}\left(md,t-\frac{d|l-m|}{v}\right)}{|l-m|^3}\\
-\frac{2}{\tau_0 \omega_1}\dot{R}_{\alpha}\left(ld,t\right)+\frac{e}{ma{\omega_1}^2}E_{\alpha}\left(ld,t\right),\\
\end{array}
\end{equation}
where, $\sigma_{\alpha}=\left\{\begin{array}{l}-1,\alpha=x,y\\2,\alpha=z\end{array}\right.$ is introduced to distinguish both polarizations.
The summation in the first term of the r.h.s. of the equation (\ref{dipoll}) can be explicitly performed in the manner as presented in \cite{jacak10}, because it
is the same as for the linear theory formulation. Similarly as in the linear theory framework, one can change to the quasi-momentum picture, taking advantage of the chain periodicity (in analogy to Bloch states in crystals with the reciprocal lattice of quasi-momentum), i.e.,
\begin{equation}
\label{eq8}
\begin{array}{l}
R_{\alpha}\left(ld,t\right)=\tilde{R}_{\alpha}\left(k,t\right)e^{-ikld},\\
0\leq k \leq\frac{2\pi}{d},\tilde{R}_{\alpha}\left(k\right)\sin\left(t\omega_1+\beta\right).
\end{array}
\end{equation}
Thus the equation (\ref{dipoll}) can be rewritten in the following form (the Lorentz friction term was represented similarly as in equation (\ref{eq4})),
\begin{equation}
\label{eq9}
\begin{array}{l}
\ddot{\tilde{R}}_{\alpha}\left(k,t\right)+{\tilde{\omega}_{\alpha}}^2 \tilde{R}_{\alpha}\left(k,t\right)\left\{\frac{1}{\tau_{\alpha} \omega_1}\right.\\
\left. +\frac{1}{3}\left(\frac{\omega_p}{\sqrt{3\varepsilon}c}\right)^5\left(\frac{5}{2}|
\dot{\tilde{R}}_{\alpha}\left(k,t\right)|^2-3|\tilde{R}_{\alpha}\left(k,t\right)|^2\right)\right\}=0,\\
\end{array}
\end{equation}
where,
${\tilde{\omega}_{\alpha}}^2 = 1 - 2\sigma_{\alpha}\frac{a^3}{d^3}\cos\left(kd\right)\cos\left(\frac{d\omega_1}{v}\right)$ and
\begin{equation}
\label{tlumienie}
\begin{array}{l}
\frac{1}{\tau_{x,y}\omega_1}=\frac{1}{\tau_0\omega_1}+\frac{1}{4}\left(\frac{\omega_1 d}{v}\right)\frac{a^3}{d^3}\left(\left(\frac{\omega_1 d}{v}\right)^2-\left(kd-\pi \right)^2+\frac{\pi^2}{3}\right),\\
\frac{1}{\tau_{z}\omega_1}=\frac{1}{\tau_0\omega_1}+\frac{1}{2}\left(\frac{\omega_1 d}{v}\right)\frac{a^3}{d^3}\left(\left(\frac{\omega_1 d}{v}\right)^2-\left(kd-\pi \right)^2+\frac{\pi^2}{3}\right).\\
\end{array}
\end{equation}
In the above formulae the remarkable property is linked with the expressions for the attenuation rate for both polarizations. Two last expressions below equation (\ref{eq9}) give these damping rates explicitly and one can notice that they could change their signs depending on values for $d$, $a$ and $k$. In Fig. (\ref{fig2}) the regions of negative value for damping rates are marked (for both polarizations).
\begin{figure}
\scalebox{0.42}{\includegraphics{fig2pls.eps}}
\caption{Regions for negative value of damping rates for plasmon-polaritons in the chain (1 for longitudinal polarization modes and 2 for transversal one, in the nonlinear formulation framework; for the linear theory, red lines gives the position of vanishing damping rate for longitudinal modes of plasmon-polaritons and blue lines the same for transversal modes}
\label{fig2}
\end{figure}
Applying the same methods for solution of the nonlinear equation (\ref{eq9}) as in the former paragraph, using the asymptotic methods \cite{mit}, one can find the corresponding solutions for both regions with positive and negative damping rate, respectively.
For the positive damping rate, $ \frac{1}{\tau_{\alpha}\omega_1}>0$,
\begin{equation}
\label{eq10}
\begin{array}{l}
\tilde{R}_{\alpha}\left(k,t\right)=\frac{A_{\alpha 0}e^{-\frac{t}{\tau_{\alpha}}}}{\sqrt{1+\gamma_{\alpha}A_{\alpha 0}^2\left(1-e^{-\frac{2t}{\tau_{\alpha}}}\right)}}\cos\left(\omega_{\alpha}t+\Theta_0\right),\\
\tilde{R}_{\alpha}\left(k,t\right)\rightarrow_{(t\rightarrow \infty)} 0,\\
\end{array}
\end{equation}
where,
$\gamma_{\alpha}=|\tau_{\alpha}\omega_1|\left(\frac{\omega_1 a}{c}\right)^3\frac{1}{4}\left(\frac{5}{2}\tilde{\omega}_{\alpha}^2-1\right)$. We note from the form of Eq. (\ref{eq10}) that this is a damped mode, vanishing at longer time scale.
Nevertheless, for negative damping rate, $\frac{1}{\tau_{\alpha}\omega_1}<0$,
the solution has a different form,
\begin{equation}
\begin{array}{l}
\tilde{R}_{\alpha}\left(k,t\right)=\frac{A_{\alpha 0}e^{\frac{t}{|\tau_{\alpha}|}}}{\sqrt{1+\gamma_{\alpha}A_{\alpha 0}^2\left(e^{\frac{2t}{|\tau_{\alpha}|}}-1\right)}}\cos\left(\omega_{\alpha}t+\Theta_0\right)\\
\tilde{R}_{\alpha}\left(k,t\right)\rightarrow_{(t \rightarrow \infty)}\frac{1}{\sqrt{\gamma_{\alpha}}}\cos\left(t\omega_{\alpha}+\Theta_0\right).\\
\end{array}
\end{equation}
This solution is stable. It corresponds to an undamped mode which stabilizes on the fixed amplitude at longer time scale, independent of initial conditions expressed by $A_{\alpha}$.
The corresponding dipole oscillations attain
in the latter case the form of 'planar' waves propagating along the chain,
\begin{equation}
\label{eq14}
D_{\alpha}=\frac{e N_e a}{\sqrt{\gamma_{\alpha}}}\frac{1}{2}\left\{e^{i\left(\omega_{\alpha}t-kld\right)}+e^{-i\left(\omega_{\alpha}+kld\right)}\right\}.
\end{equation}
From the above formulae it follows that for positive attenuation rate we deal with ordinary damped plasmon-polariton propagation, not strongly modified in comparison to linear theory (due to small value of the factor $\gamma$). Nevertheless, in the case of negative damping rate the solution behaves differently--on longer time scale this solution stabilizes on the constant amplitude independently of initial conditions. This remarkable property characterizes undamped propagation of plasmon-polariton along the chain. If one turns back to dipole explicit form (\ref{eq1}), then typical 'planar' wave formula with constant amplitude describes this undamped mode, as written in the equation (\ref{eq14}). The region of negative damping correspond thus, within the nonlinear approach, to undamped modes with the fixed amplitude. Let us note that the same region was linked with instability of the linear theory (which was, however, the artefact of the linear approach).
Finally, one can calculate the group velocity of the undamped plasmon-polariton mode, in the following form,
\begin{equation}
\label{eq11}
v_{\alpha}=\frac{d\omega_{\alpha}}{dk}=\omega_1 d\frac{\sigma_{\alpha}a^3\sin\left(kd\right)\cos\left(\frac{d\omega_1}{c}\right)}{d^3\sqrt{1-2\sigma_{\alpha}\frac{a^3}{d^3}\cos\left(kd\right)\cos\left(\frac{d\omega_1}{c}\right)}}.
\end{equation}
From this formula it follows that the group velocity of the unadamped wave type collective plasmon excitation (called plasmon-polariton) may attain different values depending on $a$, $d$ and $k$.
Indicated above undamped mode of propagation of collective surface plasmons seems to match with experimentally observed log range propagation of plasmon excitations along the metallic nano-chain \cite{maradudin,atwater1,atwater,ggg,plasmons}. The constant and fixed value of the amplitude for these oscillations (\ref{eq10}) are independent of initial conditions, which means that these excitations will be present in the system even if are excited by arbitrary small fluctuations. Thus one can conclude that they are self-exciting modes which always present in the system provided that radii of spheres and their separation in the chain have values for which at least one of the attenuation rates (\ref{tlumienie}) is negative.
\section{Conclusions}
We have demonstrated the practical utilization of RPA semiclassical description of plasmon oscillations in metallic nano-spheres. The oscillatory form of dynamics both for volume and surface plasmons, rigorously described upon the RPA semiclassical limit fits well with the large nano-sphere case, when the nano-sphere radius is greater than 10 nm and lower than $60$ nm, (for Au, Ag or Cu material), what is confirmed by experimental observations, on the other hand. The most important property of plasmons on such large nano-spheres is the very strong e-m irradiation caused by these excitations, which results in quick damping of oscillations. The attenuation effects for plasmons were not, however, included into the quantum RPA model. Nevertheless, they could be included by a phanomenological manner, taking advantage of the oscillatory form of dynamical equations. Some information on plasmon damping can be taken from microscopic analyzes of small metallic clusters (especially made by LDA and TDLDA methods of numerical simulations employing Kohn-Sham equation). For larger nano-spheres, these effects, mainly of scattering type (also Landau damping), are, however, not specially important as diminishing with radius growth, as $\frac{1}{a}$.
The irradiation effects overwhelming the energy losses in the case of large nano-spheres can be grasped in terms of the Lorentz friction, which reduces the charge movement. This approach has been analyzed in the present paper. Two distinct situations were indicated, the first one--of the free radiation to far-field zone in dielectric (or vacuum) surroundings of single nanoparticle and the second one, when in the near-field zone of plasmons on the nano-sphere, an additional charged system is located.
This additional charge system acting as the e-m energy receiver, strongly modifies the e-m potentials of the source and in this way modifies energy emission in comparison to the free emission in vacuum or in dielectric surroundings. In particular, the Lorentz friction is modified in the case of energy receiver presence in the near field zone of plasmonms, in comparison to simple free emission to the far-field zone. The e-m energy receiver located close to emitting nano-sphere, could be semiconductor (as in the case of metallically modified solar cells) or other metallic nano-spheres (as in the case of metallic nano-chain). The latter situation has been analyzed in this paper. We have shown previously \cite{jacak10} that along the infinite nano-chain the collective plasmon-polaritons can propagate (being collective surface plasmons coupled by e-m field in near-field zone), which at certain values of nano-sphere radii and separation in the chain, appear as undamped modes. Simultaneously, the instability regions of linear theory of plasmon-polariton dynamics occur, which shows that the nonlinear corrections must be included.
In this paper we have developed the nonlinear theory of collective plasmon-polariton dynamics along the chain, including nonlinear corrections to Lorentz friction force. Even though the related nonlinearity is small, it suffices to regularize the instable linear approach. As the most important observation, we noted the presence of undamped excitations (instead of those instable within the linear approach), which have fixed amplitude independently how small or large the initial conditions were. This excitations, typical for nonlinear systems, would have some practical significance, e.g., to enhance sensitivity of antennas with coverings by plasmon nano-systems offering self-induced collective plasmon-polaritons in wide range of frequencies, which would be excited by even very small signal (the energy to attain the stable level of plasmon-polariton amplitude would be supplied, in this case, by an external auxiliary supply).
\bibliographystyle{spphys}
|
{
"timestamp": "2012-06-28T02:06:01",
"yymm": "1206",
"arxiv_id": "1206.6371",
"language": "en",
"url": "https://arxiv.org/abs/1206.6371"
}
|
\section{Introduction}\label{Sec:Intro}
Anomalies are a fascinating set of phenomena exhibited by field theories
and string theories. For the sake of clarity let us begin by
distinguishing between three quite different phenomena bearing that name.
The first phenomenon is when a symmetry of a classical action fails to
be a symmetry at the quantum level. One very common example of an
anomaly of this kind is the breakdown of classical scale invariance
of a system when we consider the full quantum theory. This breakdown results
in \textit{renormalization group flow}, i.e., a scale-dependence
of physical quantities even in a classically scale-invariant
theory. Often this classical symmetry cannot be restored without
seriously modifying the content of the theory. Anomalies of this
kind are often serve as a cautionary tale to remind us that
the symmetries of a classical action like scale invariance
will often not survive quantisation.
The second set of phenomena are what are termed as gauge anomalies.
A system is said to exhibit a gauge anomaly if a particular
classical gauge redundancy of the system is no more a
redundancy at a quantum level. Since such redundancies are often
crucial in eliminating unphysical states in a theory, a gauge
anomaly often signifies a serious mathematical inconsistency
in the theory. Hence this second kind of anomalies serve as
a consistency criteria whereby we discard any theory exhibiting
gauge anomaly as most probably inconsistent.
The third set of phenomena which we would be mainly interested
in this work is when a genuine symmetry of a quantum theory
is no more a symmetry when the theory is placed in a non-trivial
background where we turn on sources for various operators in the theory.
This lack of symmetry is reflected in the fact that the path integral
with these sources turned on is no more invariant under the original
symmetry transformations. If the sources are non-trivial
gauge/gravitational backgrounds (corresponding to the charge/energy-momentum
operators in the theory) the path integral is no more gauge-invariant.
In fact as is well known the gauge transformation of the path-integral
is highly constrained and the possible transformations
are classified by the Wess-Zumino descent relations\footnote{The Wess-Zumino
descent relations are dealt with in detail in various textbooks\cite{Weinberg:1996kr,
Bertlmann:1996xk,Bastianelli:2006rx} and lecture notes \cite{Harvey:2005it,Bilal:2008qx}.}.
Note that unlike the previous two phenomena here we make no
reference to any specific classical description or the process of
quantisation and hence this kind of anomalies are well-defined
even in theories with multiple classical descriptions (or theories
with no known classical description). Unlike the first kind of
anomalies the symmetry is simply recovered at the quantum level
by turning off the sources. Unlike the gauge anomalies the third
kind of anomalies do not lead to any inconsistency. In what
follows when we speak of anomaly we will always have in mind this
last kind of anomalies unless specified otherwise.
Anomalies have been studied in detail in the least few decades
and their mathematical structure and phenomenological consequence
for zero temperature/chemical potential situations are reasonably
well-understood. However the anomaly related phenomena
in finite temperature setups let alone in non-equilibrium states
are still relatively poorly understood despite their obvious relevance
to fields ranging from solid state physics to cosmology. It is becoming
increasingly evident that there are universal transport processes
which are linked to anomalies present in a system and that study
of anomalies provide a non-perturbative way of classifying these
transport processes say in solid-state physics\cite{2012PhRvB..85d5104R}.
While the presence of transport processes linked to anomalies had been
noticed before in a diversity of systems ranging from free fermions\footnote{It would
be an impossible task to list all the references in
the last few decades which have discovered (and rediscovered)
such effects in free/weakly coupled theories in
various disguises using a diversity of methods . See for example
\cite{Vilenkin:1978hb} for what is probably the earliest study in $3+1d$. See
\cite{Loganayagam:2012pz} for a recent generalisation to arbitrary dimensions.}
to holographic fluids\footnote{See for example \cite{Erdmenger:2008rm,Banerjee:2008th,
Torabian:2009qk} for some of the initial holographic results.}
a main advance was made in \cite{Son:2009tf}. In that work it was
shown using very general entropy arguments that the $U(1)^3$ anomaly
coefficient in an arbitrary $3+1d$ relativistic field theory is
linked to a specific transport process in the corresponding hydrodynamics.
This argument has since then been generalised to finite temperature
corrections \cite{Neiman:2010zi, Loganayagam:2011mu} and $U(1)^{n+1}$ anomalies in
$d=2n$ space time dimensions \cite{Kharzeev:2011ds, Loganayagam:2011mu}.
In particular the author of \cite{Loganayagam:2011mu} identified a
rich structure to the anomaly-induced transport processes
by writing down an underlying Gibbs-current which captured
these processes in a succinct way. Later in a microscopic
context in ideal Weyl gases, the authors of \cite{Loganayagam:2012pz}
identified this structure as emerging from an adiabatic
flow of chiral states convected in a specific way in
a given fluid flow.
While these entropy arguments are reasonably straightforward they
appear somewhat non-intuitive from a microscopic field theory viewpoint.
It is especially important to have a more microscopic understanding of these
transport processes if one wants to extend the study of anomalies
far away from equilibrium where one cannot resort to such
thermodynamic arguments. So it is crucial to first rephrase
these arguments in a more field theory friendly terms so
that one may have a better insight
on how to move far away from equilibrium.
Precisely such a field-theory friendly reformulation in $3+1d$ and
$1+1d$ was found recently in the references \cite{Banerjee:2012iz} and
\cite{Jain:2012rh} respectively. Our main aim in this paper is to
generalise their results to arbitrary even space time dimensions. So
let us begin by repeating the basic physical idea behind this
reformulation in the next few paragraphs.
Given a particular field theory exhibiting certain anomalies, one begins
by placing that field theory in a time-independent gauge/gravitational
background at finite temperature/chemical potential. We take
the gauge/gravitational background to be spatially slowly
varying compared to all other scales in the theory. Using this
one can imagine integrating out all the heavy modes\footnote{Time-independence
at finite temperature and chemical potential essentially means we are
doing a Euclidean field theory. Unlike the Lorentzian field theory
(which often has light-hydrodynamic modes) the Euclidean field theory
has very few light modes except probably the Goldstone modes arising out
of spontaneous symmetry breaking. We thank Shiraz Minwalla for emphasising
this point.} in the theory to generate an effective Euler-Heisenberg type effective action
for the gauge/gravitational background fields at
finite temperature/chemical potential.
In the next step one expands this effective action in a spatial derivative
expansion and then imposes the constraint that its gauge transformation
be that fixed by the anomaly. This constrains the terms that can appear
in the derivative expansion of the Euler-Heisenberg type effective action.
As is clear from the discussion above, this effective action and
the corresponding partition function have a clear microscopic
interpretation in terms of a field-theory path integral and hence
is an appropriate object in terms of which one might try to
reformulate the anomalous transport coefficients.
The third step is to link various terms that appear in
the partition function to the transport coefficients in the
hydrodynamic equations. The crucial idea in this link is the
realisation that the path integral we described above is essentially
dominated by a time-independent hydrodynamic state (or more
precisely a hydrostatic state ). This means in particular that the
expectation value of energy/momentum/charge/entropy calculated via
the partition function should match with the distribution
of these quantities in the corresponding hydrostatic state.
These distributions in turn depend on a subset of transport
coefficients in the hydrodynamic constitutive relations which
determine the hydrostatic state. In this way various terms that
appear in the equilibrium partition function are linked to/constrain the
transport coefficients crucial to hydrostatics. Focusing on just
the terms in the path-integral which leads to the failure
of gauge invariance we can then identify the universal
transport coefficients which are linked to the anomalies.
This gives a re derivation of various entropy argument results
in a path-integral language thus opening the possibility that
an argument in a similar spirit with Schwinger-Keldysh path integral
will give us insight into non-equilibrium anomaly-induced phenomena.
Our main aim in this paper is twofold - first is to carry through in
arbitrary dimensions this program of equilibrium partition function
thus generalising the results of \cite{Banerjee:2012iz,Jain:2012rh} and
re deriving in a path-integral friendly language
the results of \cite{Kharzeev:2011ds, Loganayagam:2011mu}.
Our second aim is to clarify the relation between the Gibbs current
studied in \cite{Loganayagam:2011mu,Loganayagam:2012pz} and the
partition function of \cite{Banerjee:2012iz,Jain:2012rh}. Relating
them requires some care on carefully distinguishing the consistent
from covariant charge , the final result however is intuitive : the
negative logarithm of the equilibrium partition function
(times temperature) is simply obtained by integrating the
equilibrium Gibbs free energy density (viz. the zeroeth
component of the Gibbs free current) over a spatial hyper surface.
This provides a direct and an intuitive link between the local
description in terms of a Gibbs current vs. the global description
in terms of the partition function.
The plan of the paper is following. We will begin by mainly reviewing known
results in Section \S\ref{sec:prelim}. First we review the formalism/results of
\cite{Loganayagam:2011mu} in subsection\S\S\ref{subsec:LogaReview} where
entropy arguments were used to constrain the anomaly-induced transport processes
a Gibbs-current was written down which captured those processes in a succinct way.
This is followed by subsection\S\S\ref{subsec:PartitionReview} where we
briefly review the relevant details of the equilibrium partition function formalism
for fluids as developed in \cite{Banerjee:2012iz}. A recap of the relevant results in
(3+1) and (1+1) dimensions\cite{Banerjee:2012iz,Jain:2012rh} and a
comparison with results in this paper are relegated to appendix~\ref{app:oldresult}.
Section \S\ref{sec:2ndimu1} is devoted to the derivation of transport coefficients
for $2n$ dimensional anomalous fluid using the partition function method. The
next section\S\ref{sec:entropy} contains construction of entropy current for the fluid
and the constraints on it coming from partition function. This mirrors similar
discussions in \cite{Banerjee:2012iz,Jain:2012rh}. We then compare these results
to the results of \cite{Loganayagam:2011mu} presented before in
subsection\S\S\ref{subsec:LogaReview} and find a perfect agreement.
Prodded by this agreement, we proceed in next section\S\ref{sec:IntByParts} to a
deeper analysis of the relation between the two formalisms. We prove an
intuitive relation whereby the partition function could be directly derived
from the Gibbs current of \cite{Loganayagam:2011mu} by a simple integration
(after one carefully shifts from the covariant to the consistent charge).
This is followed by section\S\ref{sec:2ndimmul} where we generalise all
our results for multiple $U(1)$ charges. We perform a $CPT$ invariance
analysis of the fluid in section \S\ref{sec:CPT} and this imposes
constraints on the fluid partition function. We end with conclusion
and discussions in section\S\ref{sec:conclusion}.
Various technical details have been pushed to the appendices for the
convenience of the reader. After the appendix~\ref{app:oldresult}
on comparison with previous partition function results in (3+1) and
(1+1) dimensions, we have placed an appendix~\ref{app:hydrostatics} detailing
various specifics about the hydrostatic configuration considered in
\cite{Banerjee:2012iz}. We then have an appendix~\ref{app:variationForms}
where we present the variational formulae to obtain currents from
the partition function in the language of differential forms.
This is followed by an appendix~\ref{app:formConventions}
on notations and conventions (especially the conventions of wedge product etc.).
\section{Preliminaries}\label{sec:prelim}
In this section we begin by reviewing and generalising various results from \cite{Loganayagam:2011mu} where
constraints on anomaly-induced transport in arbitrary dimensions were derived using
adiabaticity (i.e., the statement that there is no entropy production associated with
these transport processes). Many of the zero temperature results here were also
independently derived by the authors of \cite{Kharzeev:2011ds}.
We will then review the construction of equilibrium partition function
(free energy) for fluid in the rest of the section. The technique
has been well explained in \cite{Banerjee:2012iz} and
familiar readers can skip this part.
\subsection{Adiabaticity and Anomaly induced transport}\label{subsec:LogaReview}
Hydrodynamics is a low energy (or long wavelength) description of a
quantum field theory around its thermodynamic equilibrium. Since the fluctuations are
of low energy, we can express physical data in terms of derivative expansions of
fluid variables (fluid velocity $u(x)$, temperature $T(x)$ and chemical potential
$\mu(x)$) around their equilibrium value.
The dynamics of the fluid is described by some conservation equations.
For example, the conservation equations of the fluid stress-tensor or the fluid charge
current. These are known as constitutive equations.
The stress tensor and charged current of fluid can be expressed in terms of fluid
variables and their derivatives. At any derivative order, a generic form of
the stress tensor and charged current can be written
demanding symmetry and thermodynamics of the underlying field theory. These generic expressions
are known as constitutive relations. As it turns out, validity of 2nd law of thermodynamics
further constraints the form of these constitutive relations.
The author of \cite{Loganayagam:2011mu} assumed the following form for the
constitutive relations describing energy, charge and
entropy transport in a fluid
\begin{equation}
\begin{split}
T^{\mu\nu} &\equiv \varepsilon u^\mu u^\nu + p P^{\mu\nu} + q^\mu_{anom}u^\nu + u^\mu q^\nu_{anom} + T^{\mu\nu}_{diss}\\
J^{\mu} &\equiv q u^\mu + J^{\mu}_{anom}+J^{\mu}_{diss} \\
J^\mu_S &\equiv s u^\mu + J^\mu_{S,anom}+J^\mu_{S,diss}\\
\end{split}
\end{equation}
where $u^\mu$ is the velocity of the fluid under consideration which obeys $u^\mu u_\mu =-1$ when
contracted using the space time metric $g_{\mu\nu}$. Further, $P^{\mu\nu}\equiv g^{\mu\nu}+u^\mu u^\nu$ ,
pressure of the fluid is $p$ and $\{\epsilon,q,s\}$ are the
energy,charge and the entropy densities respectively. We have denoted by $\{q^\mu_{anom},J^{\mu}_{anom},
J^\mu_{S,anom}\}$ the anomalous heat/charge/entropy currents and by $\{T^{\mu\nu}_{diss},J^{\mu}_{diss},
J^\mu_{S,diss}\}$ the dissipative currents.
\subsubsection{Equation for adiabaticity}
A convenient way to describe adiabatic transport process is via a
\textbf{covariant} anomalous Gibbs current $\prn{\mathcal{G}^{Cov}_{anom}}^\mu$.
The adjective \textbf{covariant} refers to the fact that the Gibbs free energy
and the corresponding partition function are computed by turning on chemical
potential for the \textbf{covariant} charge. This is to be contrasted with
the \textbf{consistent} partition function and
the corresponding \textbf{consistent} anomalous Gibbs current
$\prn{\mathcal{G}^{Consistent}_{anom}}^\mu$.
Since this distinction is crucial let us elaborate this in the
next few paragraphs - it is a fundamental result due to Noether that the continuous
symmetries of a theory are closely linked to the conserved
currents in that theory. Hence when the path integral fails to have
a symmetry in the presence of background sources, there are
two main consequences - first of all it directly
leads to a modification of the corresponding charge conservation and
a failure of Noether theorem. The second consequence is that various
correlators obtained by varying the path integral are not gauge-covariant
and a more general modifications of Ward identities occur.
A simple example is the expectation value of the current obtained by
varying the path integral with respect to a gauge field (often termed
the \textbf{consistent} current ) as,
\[ J^{\mu}_{Consistent}\equiv \frac{\partial S}{\partial {\cal A}_{\mu}} .\]
The consistent current is not covariant under gauge transformation.
As has been explained in great detail in \cite{Bardeen:1984pm} thus
there exists another current in anomalous theories: the covariant current.
The covariant current $J^{\mu}_{Cov}$ is a current shifted with respect
to the consistent current by an amount $J_c^{\mu}$. The shift is such
that its gauge transformation is anomalous and it exactly cancels the
gauge non invariant part of the consistent current. Thus, the covariant
current is covariant under the gauge transformation, as suggested by its name.
The covariant Gibbs current describes the transport of Gibbs free energy when
a chemical potential is turned on for the covariant charge.
We will take a Hodge-dual of this covariant Gibbs current to get a $d-1$ form
in d-space time dimensions. Let us denote this Hodge-dual by $\bar{\mathcal{G}}^{Cov}_{anom}$.
The anomalous parts of charge/entropy/energy currents can be derived from this
Gibbs current via thermodynamics
\begin{equation}
\begin{split}
\bar{J}^{Cov}_{anom} &= -\frac{\partial\bar{\mathcal{G}}_{anom}}{\partial\mu}\\
\bar{J}^{Cov}_{S,anom} &= -\frac{\partial\bar{\mathcal{G}}_{anom}}{\partial T}\\
\bar{q}^{Cov}_{anom} &= \bar{\mathcal{G}}_{anom} + T \bar{J}_{S,anom} + \mu \bar{J}_{anom}
\end{split}
\end{equation}
Then according to \cite{Loganayagam:2011mu} the condition for adiabaticity is
\begin{equation}\label{adiabiticity}
d\bar{q}^{Cov}_{anom} + \mathfrak{a} \wedge \bar{q}^{Cov}_{anom} -\mathcal{E}\wedge \bar{J}^{Cov}_{anom}
= T d\bar{J}^{Cov}_{S,anom} + \mu d\bar{J}^{Cov}_{anom} -\mu \bar{\mathfrak{A}}^{Cov}
\end{equation}
where $\mathfrak{a},\mathcal{E}$ are the acceleration 1-form and the rest-frame
electric field 1-form respectively
defined via
\[ \mathfrak{a} \equiv (u.\nabla)u_\mu\ dx^\mu\ ,\quad \mathcal{E}\equiv u^\nu\mathcal{F}_{\mu\nu} dx^\mu \]
Further the rest frame magnetic field/vorticity 2-forms are defined by subtracting out
the electric part from the gauge field strength
and the acceleration part from the exterior derivative of velocity, viz.,
\[ \mathcal{B}\equiv \mathcal{F}-u\wedge\mathcal{E} \ ,\quad 2\omega \equiv du+u\wedge \mathfrak{a} \]
The symbol $\bar{\mathfrak{A}}^{Cov}$ is the d-form which is the
Hodge dual of the rate at which the \textbf{covariant}
charge is created due to anomaly,i.e.,
\[ d\bar{J}^{Cov} = \bar{\mathfrak{A}}^{Cov} \]
where $\bar{J}^{Cov}$ is the entire covariant charge current including both the anomalous
and the non-anomalous pieces. For simplicity we have restricted our attention to a single U(1)
global symmetry which becomes anomalous on a non-trivial background.
In terms of the Gibbs current , we can write the adiabiticity condition \eqref{adiabiticity} as,
\begin{equation}\label{eq:adiabG}
d\bar{\mathcal{G}}^{Cov}_{anom} + \mathfrak{a} \wedge \bar{\mathcal{G}}^{Cov}_{anom}+\mu \bar{\mathfrak{A}}^{Cov}
= \prn{dT+\mathfrak{a}T}\wedge \frac{\partial\bar{\mathcal{G}}^{Cov}_{anom}}{\partial T}
+ \prn{d\mu+\mathfrak{a}\mu-\mathcal{E}}\wedge \frac{\partial\bar{\mathcal{G}}^{Cov}_{anom}}{\partial \mu}
\end{equation}
\subsubsection{Construction of the polynomial $\mathfrak{F}^\omega_{anom}$}
The main insight of \cite{Loganayagam:2011mu} is that in d-space time
dimensions the solutions of this equation are most conveniently phrased
in terms of a single homogeneous polynomial of degree $n+1$
in temperature $T$ and chemical potential $\mu$.
Following the notation employed in \cite{Loganayagam:2012pz}
we will denote this polynomial as $\mathfrak{F}^\omega_{anom}[T,\mu]$. As was realised
in \cite{Loganayagam:2012pz}, this polynomial is often closely related to the anomaly polynomial
of the system\footnote{We remind the reader that the anomalies of a theory living in $d=2n$ spacetime
dimensions is succinctly captured by a $2n+2$ form living in \emph{two dimensions higher}. This $2n+2$ form called the
anomaly polynomial (since it is a polynomial in external/background field strengths $\mathcal{F}$ and $\mathfrak{R}$)
is related to the variation of the effective action $\delta W$ via \emph{the descent relations}
\[ \mathcal{P}_{anom}=d\Gamma_{CS}\ ,\qquad \delta \Gamma_{CS}= d \delta W \]
We will refer the reader to various textbooks\cite{Weinberg:1996kr,
Bertlmann:1996xk,Bastianelli:2006rx} and lecture notes \cite{Harvey:2005it,Bilal:2008qx} for a more detailed
exposition.} . More precisely, for a variety of systems we have a remarkable relation
between $\mathfrak{F}_{anom}^\omega[T,\mu]$ and the anomaly polynomial $\mathcal{P}_{anom} \brk{ \mathcal{F}, \mathfrak{R}}$
\begin{equation}\label{eq:anomFP}
\begin{split}
\mathfrak{F}_{anom}^\omega[T,\mu] = \mathcal{P}_{anom} \brk{ \mathcal{F} \mapsto \mu, p_1(\mathfrak{R}) \mapsto - T^2 , p_{k>1}(\mathfrak{R}) \mapsto 0 }
\end{split}
\end{equation}
Let us be more specific : on a $(2n-1)+1$ dimensional space time consider a theory with
\begin{equation}\label{eq:FOmegaC}
\begin{split}
\mathfrak{F}^\omega_{anom}[T,\mu] &= \mathcal{C}_{anom}\mu^{n+1}+\sum_{m=0}^{n}C_m T^{m+1}\mu^{n-m}\\
\end{split}
\end{equation}
Assuming that the theory obeys the
replacement rule \eqref{eq:anomFP} such a $\mathfrak{F}^\omega_{anom}[T,\mu]$ can be obtained
from an anomaly polynomial\footnote{Since all
relativistic theories only have integer powers of Pontryagin forms the constants $C_{m}$ should
vanish whenever $m$ is even. As we shall see later that another way to arrive at the same conclusion
is to impose CPT invariance.}
\begin{equation}
\begin{split}
\mathcal{P}_{anom} &= \mathcal{C}_{anom}\mathcal{F}^{n+1}+\sum_{m=0}^{n}C_m \brk{- p_1(\mathfrak{R})}^{\frac{m+1}{2}}\mathcal{F}^{n-m}+\ldots\\
\end{split}
\end{equation}
where we have presented the terms which do not involve the higher Pontryagin forms.
Restricting our attention only to the $U(1)^{n+1}$ anomaly (and ignoring the mixed/pure
gravitational anomalies ) we can write
\begin{equation}\label{eq:dJ}
\begin{split}
d\bar{J}_{Consistent} &=\mathcal{C}_{anom}\mathcal{F}^n\\
d\bar{J}_{Cov} &=(n+1)\mathcal{C}_{anom} \mathcal{F}^n \\
\end{split}
\end{equation}
and their difference is given by
\begin{equation}\label{eq:shift}
\begin{split}
\bar{J}_{Cov} = \bar{J}_{Consistent}+n \mathcal{C}_{anom}\hat{\mathcal{A}}\wedge \mathcal{F}^{n-1}
\end{split}
\end{equation}
The solution of \eqref{eq:adiabG} corresponding to the homogeneous polynomial \eqref{eq:FOmegaC}
is given by
\begin{equation}\label{eq:GCovBOmega}
\begin{split}
\bar{\mathcal{G}}^{Cov}_{anom}
&= C_0 T \hat{\mathcal{A}}\wedge\mathcal{F}^{n-1}+ \sum_{m=1}^{n}\left[\mathcal{C}_{anom}\binom{n+1}{m+1}\mu^{m+1}\right.\\
&\qquad \left. + \sum_{k=0}^{m}C_k \binom{n-k}{m-k} T^{k+1}\mu^{m-k}\right] (2\omega)^{m-1} \mathcal{B}^{n-m}\wedge u \\
\end{split}
\end{equation}
Here $\hat{\mathcal{A}}$ is the $U(1)$ gauge-potential 1-form in some gauge with
$\mathcal{F}\equiv d\hat{\mathcal{A}}$ being its field-strength 2-form. Further,
$\mathcal{B},\omega$ are the rest frame magnetic field/vorticity 2-forms
and $T ,\mu $ are the local temperature and chemical potential respectively.
They obey
\begin{equation}
\begin{split}
(d\mathcal{B})\wedge u = -(2\omega)\wedge\mathcal{E}\wedge u \ , \quad d(2\omega)\wedge u = (2\omega)\wedge\mathfrak{a}\wedge u
\end{split}
\end{equation}
Using these equations it is a straightforward exercise to check that
\eqref{eq:GCovBOmega} furnishes a solution to \eqref{eq:adiabG}.
We will make a few remarks before we proceed to derive charge/entropy/energy currents
from this Gibbs current. Note that if one insists that the Gibbs current be gauge-invariant
then we are forced to put $C_0=0$ - in the solution presented in
\cite{Loganayagam:2011mu} this condition was implicitly assumed
and the $C_0$ term was absent. The authors of \cite{Banerjee:2012iz} later
relaxed this assumption insisting gauge-invariance only for
the covariant charge/energy currents. Since we would be interested in
comparison with the results derived in \cite{Banerjee:2012iz} it
is useful to retain the $C_0$ term.
Now we use thermodynamics to obtain the charge current as
\begin{equation}
\begin{split}
&\bar{J}^{Cov}_{anom} \\
&=- \sum_{m=1}^{n} \left[ (m+1)\mathcal{C}_{anom}\binom{n+1}{m+1} \mu^m
\right.\\
&\qquad\left. +\sum_{k=0}^{m}(m-k)C_k \binom{n-k}{m-k} T^{k+1}\mu^{m-k-1}\right] (2\omega)^{m-1} \mathcal{B}^{n-m}\wedge u \\
\end{split}
\end{equation}
and the entropy current is given by
\begin{equation}
\begin{split}
\bar{J}^{Cov}_{S,anom}
&=- C_0\hat{\mathcal{A}}\wedge\mathcal{F}^{n-1}\\
&\qquad - \sum_{m=1}^{n}\sum_{k=0}^{m}(k+1) C_k \binom{n-k}{m-k} T^{k}\mu^{m-k} (2\omega)^{m-1} \mathcal{B}^{n-m}\wedge u\\
\end{split}
\end{equation}
The energy current is given by
\begin{equation}
\begin{split}
&\bar{q}^{Cov}_{anom}\\
&=- \sum_{m=1}^{n}m\left[\mathcal{C}_{anom}\binom{n+1}{m+1} \mu^{m+1}
\right.\\
&\qquad \left.+\sum_{k=1}^{m}C_k \binom{n-k}{m-k} T^{k+1}\mu^{m-k}\right] (2\omega)^{m-1} \mathcal{B}^{n-m}\wedge u \\
\end{split}
\end{equation}
These currents satisfy an interesting Reciprocity type relationship
noticed in \cite{Loganayagam:2011mu}
\begin{equation}\label{eq:reciprocity}
\frac{\delta \bar{q}^{Cov}_{anom}}{\delta \mathcal{B}} = \frac{\delta \bar{J}^{Cov}_{anom}}{\delta (2\omega)}
\end{equation}
While this is a solution in a generic frame one can specialise to the Landau frame (where the
velocity is defined via the energy current) by a frame transformation
\begin{equation}
\begin{split}
u^\mu &\mapsto u^\mu - \frac{q^\mu_{anom}}{\epsilon + p}, \\%&\qquad J^\mu_{anom} \mapsto J^\mu_{anom} - q \frac{q^\mu_{anom}}{\epsilon + p}\\
J^\mu_{anom} &\mapsto J^\mu_{anom} - q \frac{q^\mu_{anom}}{\epsilon + p} ,\\
J^\mu_{S,anom}& \mapsto J^\mu_{S,anom} - s \frac{q^\mu_{anom}}{\epsilon + p},\\%&\qquad q^\mu_{anom} \mapsto 0\\
q^\mu_{anom} &\mapsto 0\\
\end{split}
\end{equation}
to get
\begin{equation}
\begin{split}
\bar{J}^{Cov,Landau}_{anom} &= \sum_{m=1}^{n}\xi_m(2\omega)^{m-1} \mathcal{B}^{n-m}\wedge u\\
\bar{J}^{Cov,Landau}_{S,anom} &= \sum_{m=1}^{n}\xi^{(s)}_m(2\omega)^{m-1} \mathcal{B}^{n-m}\wedge u+\zeta\ \hat{\mathcal{A}}\wedge\mathcal{F}^{n-1}\\
\end{split}
\end{equation}
where
\begin{equation}\label{eq:xiLoga}
\begin{split}
\xi_m &\equiv \brk{m \frac{q\mu}{\epsilon + p}-(m+1)}\mathcal{C}_{anom}\binom{n+1}{m+1} \mu^m\\
&\qquad+\sum_{k=0}^{m}\brk{m \frac{q\mu}{\epsilon + p}-(m-k)}C_k \binom{n-k}{m-k} T^{k+1}\mu^{m-k-1} \\
\xi^{(s)}_m &\equiv \brk{m \frac{sT}{\epsilon + p}}\mathcal{C}_{anom}\binom{n+1}{m+1} T^{-1}\mu^{m+1}\\
&\qquad+\sum_{k=0}^{m}\brk{m \frac{sT}{\epsilon + p}-(k+1)}C_k \binom{n-k}{m-k} T^k\mu^{m-k} \\
\zeta &= - C_0
\end{split}
\end{equation}
Often in the literature the entropy current is quoted in the form
\begin{equation}
\begin{split}
\bar{J}^{Cov,Landau}_{S,anom} &= -\frac{\mu}{T} \bar{J}^{Cov,Landau}_{anom} + \sum_{m=1}^{n}\chi_m(2\omega)^{m-1} \mathcal{B}^{n-m}\wedge u+\zeta\ \hat{\mathcal{A}}\wedge\mathcal{F}^{n-1}\\
\end{split}
\end{equation}
where
\begin{equation}\label{eq:Chi_mPrediction}
\begin{split}
\zeta &= -C_0 \\
\chi_m &\equiv \xi^{(s)}_m +\frac{\mu}{T} \xi_m\\
&= - \mathcal{C}_{anom}\binom{n+1}{m+1} T^{-1}\mu^{m+1}-\sum_{k=0}^{m}C_k \binom{n-k}{m-k} T^k\mu^{m-k} \\
\end{split}
\end{equation}
where we have used the thermodynamic relation $sT+q\mu=\epsilon + p$. By looking at \eqref{eq:GCovBOmega}
we recognise these to be the coefficients occurring in the anomalous Gibbs current :
\begin{equation}\label{eq:GibbsChi}
\begin{split}
\bar{\mathcal{G}}^{Cov}_{anom} &= -T\brk{\sum_{m=1}^{n}\chi_m(2\omega)^{m-1} \mathcal{B}^{n-m}\wedge u+\zeta\ \hat{\mathcal{A}}\wedge\mathcal{F}^{n-1}}\\
\end{split}
\end{equation}
In fact this is to be expected from basic thermodynamic considerations : the above equation is
a direct consequence of the relation $G=-T(S+\frac{\mu}{T}Q-\frac{U}{T})$ and the fact that
energy current receives no anomalous contributions in the Landau frame.
This ends our review of the main results of \cite{Loganayagam:2011mu} adopted to
our purposes. Our aim in the rest of the paper would be to derive all these results
purely from a partition function analysis.
\subsection{Equilibrium Partition Function}\label{subsec:PartitionReview}
In this subsection we review (and extension) an alternative approach to constrain the
constitutive relations, namely by demanding the existence of an equilibrium
partition function (or free energy) for the fluid as described in \cite{Banerjee:2012iz,Jain:2012rh}
\footnote{For similar discussions, see for example
\cite{Jensen:2012jh,Jensen:2012jy}.}.
Let us keep the fluid in a special background such that the background metric has a time
like killing vector and the background gauge field is time independent.
Any such metric can be put into the following Kaluza-Klein form
\begin{equation}
\begin{split}
ds^2 &= -e^{2\sigma}(dt+a_idx^i)^2 + g_{ij}dx^idx^j, \\
\hat{\cal A} &= {\cal A}_0dt + {\cal A}_idx^i
\end{split}
\label{KKform}
\end{equation}
here $i,j~\epsilon ~~(1,2 \ldots 2n-1)$ are the spatial indices. We will often use the
notation $\gamma\equiv e^{-\sigma}$ for brevity. This background has a time-like
killing vector $\partial_t$ and let $u_k^\mu=(e^{-\sigma},0,0,\ldots)$ be the
unit normalized vector in the killing direction so that
\[ u_k^\mu\partial_\mu = \gamma \partial_t \quad\text{and}\quad u_k= -\gamma^{-1}(dt+a) \]
In the corresponding Euclidean field theory description of equilibrium, the imaginary
time direction would be compactified into a thermal circle with the
size of circle being the inverse temperature of the underlying
field theory. In the 2n-1 dimensional compactified geometry, the original 2n background field
breaks as follow
\begin{itemize}
\item metric($g_{\mu\nu}$) : scalar($\sigma$), KK gauge field($a_i$), lower dimensional metric($g_{ij}$).
\item gauge field($\hat{\cal A}_\mu$) : scalar(${\cal A}_0$), gauge field(${\cal A}_i$)
\end{itemize}
Under this KK type reduction the 2n dimensional diffeomorphisms breaks up into
2n-1 dimensional diffeomorphisms and KK gauge transformations. The components
of 2n dimensional tensors which are KK-gauge invariant in 2n-1 dimensions are those with lower
time(killing direction) and upper space indices. Given a 1-form $J$
we will split it in terms of KK-invariant components as
\[ J=J_0 (dt+a_i dx^i)+ g_{ij} J^i dx^j \]
Other KK non-invariant components of $J$ are given by
\begin{equation}
\begin{split}
J^0 &= -\brk{\gamma^2 J_0+a_i J^i}\\
J_i &= g_{ij}J^j +a_i J_0
\end{split}
\end{equation}
To take care of KK gauge invariance we will identify the lower dimensional U(1)
gauge field (denoted by non script letters) as follows
\begin{equation}
\begin{split}
A_0 &= {\cal A}_0+\mu_0 , ~~A^i = {\cal A}^i \\
\Rightarrow A_i &= {\cal A}_i - {\cal A}_0 a_i {\rm ~~~and}\\
F_{ij} &= \partial_i A_j - \partial_j A_i = \mathcal{F}_{ij}
- A_0 f_{ij} -(\partial_i A_0~a_j - \partial_j A_0~a_i).
\end{split}
\label{KKinv}
\end{equation}
where $f_{ij}\equiv \partial_i a_j - \partial_j a_i$ and $\mu_0$ is a convenient
constant shift in ${\cal A}_0$ which we will define shortly. We can hence write
\[\hat{\mathcal{A}} = \mathcal{A}_0 dt + \mathcal{A} = A_0 (dt+a_i dx^i) + A_i dx^i-\mu_0 dt \]
We are now working in a general gauge - often it is useful to
work in a specific class of gauges : one class of gauges we will work on is obtained from this generic gauge by
performing a gauge transformation to remove the $\mu_0 dt$ piece. We will call these class
of gauges as the `zero $\mu_0$' gauges. In these gauges the new gauge field is given in terms of the
old gauge field via
\[ \hat{\mathcal{A}}_{\mu_0=0} \equiv \hat{\mathcal{A}}+\mu_0 dt \]
We will quote all our consistent currents in this gauge. The field strength 2-form can then be written as
\[ \mathcal{F}\equiv d\hat{\mathcal{A}} = dA+ A_0 da + dA_0\wedge(dt+a) \]
We will now focus our attention on the \textbf{consistent} equilibrium partition function
which is the Euclidean path-integral computed on space adjoined with a thermal circle of
length $1/T_0$. We will further turn on a chemical potential $\mu$ - since there are various
different notions of charge in anomalous theories placed in gauge backgrounds we need to
carefully define which of these notions we use to define the partition function\footnote{
See, for example, section\S 3 of \cite{Landsteiner:2011tf} for a discussion of some of the
subtleties.}. While in the previous subsection we used the chemical potential for a
\textbf{covariant} charge and the corresponding \textbf{covariant} Gibbs free-energy
following \cite{Loganayagam:2011mu} , in this subsection we will follow \cite{Banerjee:2012iz}
in using a chemical potential for the consistent charge to define the partition
function. This distinction has to be kept in mind while making a comparison between the
two formalisms as we will elaborate later in section\S\ref{sec:IntByParts}.
The consistent partition function $Z_{Consistent}$ that we write down will be
the most general one consistent with 2n-1 dimensional diffeomorphisms,
KK gauge invariance and the U(1) gauge invariance up to anomaly.
It is a scalar $S$ constructed out of various background quantities and their derivatives.
The most generic form of the partition function is
\begin{equation}\label{parform}
W=\ln Z_{Consistent}= \int d^{2n-1}x \sqrt{g_{2n-1}} S(\sigma, A_0,a_i,A_i,g_{ij}) .
\end{equation}
Given this partition function, we compute various components of the stress tensor and
charged current from it. The KK gauge invariant components of the stress tensor
$T_{\mu\nu}$ and charge current $J_{\mu}$ can then be
obtained from the partition function as follows \cite{Banerjee:2012iz},
\begin{equation}\label{parstcu}
\begin{split}
T_{00} &= -\frac{T_0 e^{2 \sigma}}{\sqrt{-g_{2n}} }\frac{\delta W}{\delta \sigma},~~
J_0^{Consistent} = -\frac{e^{2\sigma} T_0}{\sqrt{-g_{2n}}}\frac{\delta W}{\delta A_0}, \\
T_0^i &= \frac{T_0}{\sqrt{-g_{2n}} }\bigg(\frac{\delta W}{\delta a_i}
- A_0 \frac{\delta W}{\delta A_i}\bigg),~~
J^i_{Consistent} = \frac{T_0}{\sqrt{-g_{2n}}}\frac{\delta W}{\delta A_i}, \\~~
T^{ij} &= -\frac{2 T_0}{\sqrt {-g_{2n}}} g^{il}g^{jm}\frac{\delta W}
{\delta g^{lm}}. \\
\end{split}
\end{equation}
here $\{ \sigma, a_i, g_{ij}, A_0, A_i \}$ are chosen independent sources, so the partial
derivative w.r.t any of them in the above equations means that others are kept constant.
We will sometimes use the above equation written in terms of differential forms - we will
refer the reader to appendix~\ref{app:variationForms} for the differential-form version
of the above equations.
Next
we parameterize the most generic equilibrium solution and constitutive relations for the fluid as,
\begin{eqnarray}\label{flustcu}
&& u(x)= u_0(x)+u_1(x) , \quad T(x)= T_0(x)+T_1(x) , \quad \mu(x)= \mu_0(x)+\mu_1(x) , \nonumber \\
&& T_{\mu\nu}=(\epsilon + p)u_{\mu}u_{\nu} + p g_{\mu\nu}+\pi_{\mu\nu} , \quad J^{\mu}=
q u^{\mu}+ j^{\mu}_{diss},
\end{eqnarray}
where, ${u_1,T_1,\mu_1,\pi_{\mu\nu},j^{\mu}_{diss}}$ are various derivatives of the background
quantities. Note that we will work in Landau frame throughout.
These corrections are found by comparing the fluid stress tensor
$T_{\mu\nu}$ and current $J_{\mu}$ in Eqn.\eqref{flustcu} with $T_{\mu\nu}$ and $J_{\mu}$
in Eqn.\eqref{parstcu} as obtained from the partition function. This exercise
then constrains various non-dissipative coefficients that appear in the
constitutive relations in Eqn.\eqref{flustcu}.
This then ends our short review of the formalism developed in \cite{Banerjee:2012iz}.
In the next section we will apply this formalism to a theory with $U(1)^{n+1}$
anomaly in $d=2n$ space time dimensions.
\section{Anomalous partition function in arbitrary dimensions}\label{sec:2ndimu1}
Let us consider then a fluid in a $2n$ dimensional space time. The fluid is
charged under a single $ U(1)$ abelian gauge field ${\cal A}_{\mu}$. We will
generalise to multiple abelian gauge fields later in section \S\ref{sec:2ndimmul}
and leave the non-abelian case for future study. We will continue to use the
notation in the subsection \S\S\ref{subsec:LogaReview}.
The consistent/covariant anomaly are then given by Eqn.\eqref{eq:dJ} which can
be written in components as
\begin{equation}
\begin{split}
\nabla_{\mu}J^{\mu}_{Consistent} &= \mathcal{C}_{anom}
\varepsilon^{\mu_1 \nu_1 \ldots\mu_n\nu_n}\partial_{\mu_1}\hat{\mathcal{A}}_{\nu_1}\ldots\partial_{\mu_n}\hat{\mathcal{A}}_{\nu_n}\\
&= \frac{\mathcal{C}_{anom}}{2^n} \varepsilon^{\mu_1 \nu_1 \ldots\mu_n\nu_n} \mathcal{F}_{\mu_1 \nu_1} \ldots \mathcal{F}_{\mu_n \nu_n}.\\
\nabla_{\mu}J^{\mu}_{Cov} &= (n+1) \mathcal{C}_{anom}
\varepsilon^{\mu_1 \nu_1 \ldots\mu_n\nu_n}\partial_{\mu_1}\hat{\mathcal{A}}_{\nu_1}\ldots\partial_{\mu_n}\hat{\mathcal{A}}_{\nu_n}\\
&= (n+1) \frac{\mathcal{C}_{anom}}{2^n} \varepsilon^{\mu_1 \nu_1 \ldots\mu_n\nu_n} \mathcal{F}_{\mu_1 \nu_1} \ldots \mathcal{F}_{\mu_n \nu_n}.
\end{split}
\end{equation}
and Eqn.\eqref{eq:shift} becomes
\begin{equation}\label{covcur}
J_{Cov}^{\mu} = J^{\mu}_{Consistent} + J^{\mu}_{(c)}.
\end{equation}
where
\begin{equation}\label{curcor}
\begin{split}
J^{\lambda}_{(c)} &= n\mathcal{C}_{anom} \varepsilon^{\lambda \alpha\mu_1 \nu_1 \ldots\mu_{n-1}\nu_{n-1}}
\hat{\mathcal{A}}_{\alpha} \partial_{\mu_1}\hat{\mathcal{A}}_{\nu_1}\ldots\partial_{\mu_{n-1}}\hat{\mathcal{A}}_{\nu_{n-1}}\\
&=n\frac{\mathcal{C}_{anom}}{2^{n-1}} \varepsilon^{\lambda \alpha\mu_1 \nu_1 \ldots\mu_{n-1}\nu_{n-1}}
\hat{\mathcal{A}}_{\alpha} \mathcal{F}_{\mu_1 \nu_1} \ldots \mathcal{F}_{\mu_{n-1} \nu_{n-1}}.
\end{split}
\end{equation}
The energy-momentum equation becomes
\begin{equation}
\nabla_{\mu}T^{\mu}_{\nu}= F_{\nu \mu}J^{\mu}_{Cov} ,
\end{equation}
where $J^{\mu}_{Cov}$ is the covariant current. This has been
explicitly shown in \cite{Banerjee:2012iz} \footnote{One required identity
is,
\[ \hat{\mathcal{A}}_{\alpha}
\varepsilon^{\mu_1 \nu_1 \ldots\mu_n\nu_n} \mathcal{F}_{\mu_1 \nu_1} \ldots \mathcal{F}_{\mu_n \nu_n}
= 2n\ \hat{\mathcal{A}}_{\mu_1}
\varepsilon^{\mu_1 \nu_1 \mu_2\nu_2 \ldots\mu_n\nu_n} \mathcal{F}_{\alpha \nu_1} \mathcal{F}_{\mu_2 \nu_2}\ldots \mathcal{F}_{\mu_n \nu_n}
\]
for arbitrary $2n-$dimensions}.
\subsection{Constraining the partition function}
We want to write the equilibrium free energy functional for the fluid. For this purpose,
let us keep the in the following $2n$-dimensional time independent background,
\begin{eqnarray}\label{backgr}
ds^2= - e^{2 \sigma}(dt+ a_i dx^i)^2+ g_{ij}dx^idx^j, \quad {\cal A}= (A_0, {\cal A}_i).
\end{eqnarray}
Now, we write the $(2n-1)$ dimensional equilibrium free energy that reproduces the same
anomaly as given in \eqref{anomeq}. The most generic form for the anomalous part of
the partition function is ,
\begin{equation}\label{action}
\begin{split}
W_{anom}&=\frac{1}{T_0}\int d^{2n-1}x \sqrt{g_{2n-1}}\bigg\{ \sum_{m=1}^{n}\alpha_{m-1}(A_0,T_0)\
\brk{\epsilon A (da)^{m-1}(dA)^{n-m}} \bigg.\\
&\qquad \bigg.\qquad + \alpha_{n}(T_0)\ \brk{\epsilon a (da)^{n-1}} \bigg\}.
\end{split}
\end{equation}
where, $\epsilon^{ijk\ldots }$ is the $(2n-1)$ dimensional tensor density defined via
\[ \epsilon^{i_1i_2\ldots i_{d-1}} = e^{-\sigma}\varepsilon^{0i_1i_2\ldots i_{d-1}} \]
The indices $(i,j)$ run over $(2n-1)$ values. We have used the following notation
for the sake of brevity
\begin{equation}\label{epsDef}
\begin{split}
&\brk{\epsilon A (da)^{m-1}(dA)^{n-m}} \\
&\quad \equiv \epsilon^{i j_1k_1 \ldots j_{m-1} k_{m-1} p_1 q_1\ldots p_{n-m}q_{n-m}}
A_i \partial_{j_1} a_{k_1}\ldots \partial_{j_{m-1}} a_{k_{m-1}}\partial_{p_1} A_{q_1}\ldots \partial_{p_{n-m}} A_{q_{n-m}}\\
&\brk{\epsilon (da)^{m-1}(dA)^{n-m}}^i \\
&\quad \equiv \epsilon^{i j_1k_1 \ldots j_{m-1} k_{m-1} p_1 q_1\ldots p_{n-m}q_{n-m}}
\partial_{j_1} a_{k_1}\ldots \partial_{j_{m-1}} a_{k_{m-1}}\partial_{p_1} A_{q_1}\ldots \partial_{p_{n-m}} A_{q_{n-m}}\\
\end{split}
\end{equation}
The invariance under diffeomorphism implies that $\alpha_{n}$ is a
constant in space .For $m<n$ however $\alpha_m$ can have $A_0$ dependence, as the gauge
symmetry is anomalous, but they are independent of $\sigma$, due to diffiomorphism invariance.
The consistent current computed from this partition function is,
\begin{equation}
\begin{split}
\prn{J_{anom}}_0^{Consistent} &=- e^{\sigma}\sum_{m=1}^{n} \frac{\partial\alpha_{m-1}}{\partial A_0}\brk{\epsilon A (da)^{m-1}
(dA)^{n-m}} \\
\prn{J_{anom}}^i_{Consistent} &=e^{-\sigma} \bigg\{\sum_{m=1}^{n} (n-m+1) \alpha_{m-1}
\brk{\epsilon(da)^{m-1}(dA)^{n-m}}^i \bigg. \\
&\bigg. - \sum_{m=1}^{n-1} (n-m) \frac{\partial\alpha_{m-1}}{\partial A_0} \brk{\epsilon A dA_0(da)^{m-1}(dA)^{n-m-1}}^i \bigg\}
\end{split}
\end{equation}
Next, we compute the covariant currents, following \eqref{covcur}. The correction piece for the 0-component of the current is,
\begin{equation}
(J_{(c)})_0=-n\mathcal{C}_{anom} e^{\sigma}
\sum_{m=1}^{n} A_0^{m}\binom{n-1}{m-1} \brk{\epsilon A (da)^{m-1}
(dA)^{n-m}}
\end{equation}
where, we have used the following identification for $2n$ dimensional gauge field
${\cal A}_{\mu}$ and $(2n-1)$ dimensional gauge fields $A_i, a_i$ and scalar $A_0$,
\begin{equation}
\begin{split}
\mathcal A_i &= A_i + a_i A_0 \\
\mathcal A_0 &= A_0.
\end{split}
\end{equation}
where we are working in a`zero $\mu_0$' gauge.
Thus, the 0-component of the covariant current is,
\begin{equation}
\prn{J_{anom}}_0^{Cov}= -e^{\sigma} \epsilon^{ijkl\ldots}\sum_{m=1}^{n} \brk{ \frac{\partial\alpha_{m-1}}{\partial A_0} +
n\binom{n-1}{m-1} A_0^{m-1} \mathcal{C}_{anom} }
\brk{\epsilon A (da)^{m-1}(dA)^{n-m}}.
\end{equation}
Every term in the above sum is gauge non-invariant. So the covariance of the covariant current
demands that we chose the arbitrary functions $\alpha_m$ appearing in the partition function \eqref{action}
such that the current vanishes. Thus, we get,
\begin{equation}
\frac{\partial\alpha_{m-1}}{\partial A_0} +
n\binom{n-1}{m-1} A_0^{m-1} \mathcal{C}_{anom} =0 .
\end{equation}
The solution for the above equation is,
\begin{eqnarray}\label{Csol}
&&\alpha_m = - \mathcal{C}_{anom}\binom{n}{m+1} A_0^{m+1} + \tilde{C}_m T_0^{m+1} ,
\quad m=0, \ldots, n-1 \nonumber\\
&&\alpha_n = \tilde{C}_n T_0^{n+1}
\end{eqnarray}
Here, $\tilde C_m$ are constants that can appear in the partition function.
Thus, at this point, a total of $n+1$ coefficients can appear in the partition function.
A further study of CPT invariance of the partition function will reduce this number. We will present that analysis later
in details and here we just state the result. CPT forces all $\tilde{C}_{2k} =0$.
For even $n$, the number of constants are $\frac{n}{2}$ where as for odd $n$,
the number is $(\frac{n+1}{2})$.
\subsection{Currents from the partition function}
With these functions the $i-$component of the covariant current is,
\begin{equation}\label{cufp}
\begin{split}
\prn{J_{anom}}_{Cov}^i
&= e^{-\sigma} \sum_{m=1}^{n} \bigg[A_0 \frac{\partial\alpha_{m-1}}{\partial A_0} +(n-m+1)\alpha_{m-1} \bigg]\brk{\epsilon (da)^{m-1} (dA)^{n-m}}^i \\
&= e^{-\sigma} \sum_{m=1}^{n} \bigg[-(n+1) \mathcal{C}_{anom}
\binom{n}{m} T_0 A_0^{m}\bigg.\\
&\qquad \bigg. \qquad +(n-m+1)T_{0}^{m}\tilde C_{m-1} \bigg]\brk{\epsilon (da)^{m-1} (dA)^{n-m}}^i ,
\end{split}
\end{equation}
As expected, this current is $U(1)$ gauge invariant. The different components of stress-tensor
computed from the partition function are,
\begin{equation}\label{stfp}
\begin{split}
T^{anom}_{00}&=0, \qquad T_{anom}^{ij}=0 \\
\prn{T^i_0}_{anom} &= e^{-\sigma} \sum_{m=1}^{n}\prn{ m \alpha_{m} - (n-m+1) A_0
\alpha_{m-1}} \brk{\epsilon (da)^{m-1} (dA)^{n-m}}^i\\
&= e^{-\sigma} \sum_{m=1}^{n}\left[m \tilde{C}_m T_0^{m+1}-(n+1-m) \tilde{C}_{m-1} T_0^m A_0\right.\\
&\qquad\left.\qquad+\binom{n+1}{m+1}\mathcal{C}_{anom}A_0^{m+1}\right]\brk{\epsilon (da)^{m-1} (dA)^{n-m}}^i\\
\end{split}
\end{equation}
\subsection{Comparison with Hydrodynamics}
Next, we find the equilibrium solution for the fluid variables. As usual, we keep the fluid in
the time independent background \eqref{backgr}. The equilibrium solutions for perfect charged
fluid (with out any dissipation) are,
\begin{equation}
u^{\mu}\partial_\mu= e^{-\sigma}\partial_t , \quad T=T_0 e^{-\sigma}, \quad \mu= A_0 e^{-\sigma} .
\end{equation}
The most generic constituitive relations for the fluid can be written as,
\begin{eqnarray}\label{conscor}
T_{\mu\nu}&=& (\epsilon+p) u_{\mu}u_{\nu} +p g_{\mu\nu} + \eta \sigma_{\mu\nu}+ \zeta \Theta
{\cal P}_{\mu\nu} \nonumber \\
J^{\mu}_{Cov}&=& q u^{\mu}+ J^{\mu}_{even} +J^{\mu}_{odd} ,\nonumber \\
J^{\mu}_{even}&=&\sigma (E^{\mu}- T {\cal P}^{\mu\alpha} \partial_{\alpha}\nu )+
\alpha_1 E^{\mu} + \alpha_2 T {\cal P}^{\mu\alpha} \partial_{\alpha}\nu +
\mbox{higher derivative terms}\nonumber \\
J^{\mu}_{odd}&=& \sum_{m=1}^{n} \xi_m \varepsilon^{\mu \nu\ \gamma_1\delta_1\ldots\gamma_{m-1}\delta_{m-1}\ \alpha_1\beta_1 \ldots\alpha_{n-m}\beta_{n-m}}
u_{\nu}(\partial_{\gamma}u_{\delta})^{m-1}(\partial_{\alpha}{\cal A}_{\beta})^{n-m}+\ldots.
\end{eqnarray}
Here, $J^{\mu}_{even}$ is parity even part of the charge current and
$J^{\mu}_{odd}$ is parity odd charge current. $\varepsilon^{\mu \nu \alpha
\beta \gamma \delta \ldots}$ is a $2n$ dimensional tensor density whose $(n-m)$ indices
are contracted with $\partial_{\alpha}{\cal A}_{\beta}$ and $(m-1)$ indices are contracted with
$\partial_{\gamma}u_{\delta}$.
We notice that the higher derivative part of the current gets contribution from both parity even and
odd vectors. Parity even vectors can be at any derivative order but parity odd vectors
always appear at $(n-1)$ derivative order. Thus, for a generic value of $n$ (other than $n=2$) ,
the parity even and odd parts corrections to the current will always appear at
different derivative orders. From now on, we will only concentrate on the parity odd sector.
It is also straight forward to check that $J_{0}^{odd}=0$.
Next, we look for the equilibrium solution for this fluid. Since, there exist no
gauge invariant parity odd scalar, the temperature and chemical potential do not get any correction.
Also, in $2n$ dimensional theory, the parity odd vectors that we can write are always $(n-1)$
derivative terms. No other parity odd vector at any lower derivative order exists. Since the
fluid velocity is always normalized to unity, we have,
\begin{equation}
\delta T=0, \quad \delta \mu=0, \quad \delta u_0 = - a_i \delta u^i.
\end{equation}
where, the most generic correction to the fluid velocity is,
\begin{equation}
\delta u^i = \sum_{m=1}^{n} U_m(\sigma, A_0) \brk{\epsilon(da)^{m-1} (dA)^{n-m}}^i .
\end{equation}
Here, $U_m(\sigma, A_0)$ are arbitrary coefficients and factors of $e^{\sigma}$ is introduced for later convenience.
Similarly, we can parameterize the $i-$component
of the parity-odd current as,
\begin{equation}\label{jdiss}
J^{i}_{odd} = \sum_{m=1}^{n} J_m(\sigma, A_0) \brk{\epsilon(da)^{m-1} (dA)^{n-m}}^i .
\end{equation}
The coefficients $J_m(\sigma, A_0)$ are related to the transport coefficients $\xi_m$ via
\begin{equation}\label{eq:jxi}
J_m= \sum_{k=1}^m \binom{n-k}{m-k} \xi_k \prn{-e^\sigma}^{k-1} A_0^{m-k} .
\end{equation}
With all these data, we can finally compute the corrections to the stress tensor and charged currents
and they take the following form,
\begin{eqnarray}\label{stc}
\delta T_{00}&=&0, \quad \delta T^{ij}=0, \quad \delta \tilde J_{0}=0 \nonumber \\
\delta T_{0}^i&=& -e^{\sigma} (\epsilon+p) \epsilon^{ijk\ldots} \sum_{m=1}^{n}
U_m(\sigma, A_0) ( da)^{m-1} (dA)^{n-m} \nonumber \\
\delta J^{i}_{Cov} &=& \epsilon^{ijk\ldots} \sum_{m=1}^{n}(J_m(\sigma, A_0) +
q U_m(\sigma, A_0))( da)^{m-1} (dA)^{n-m}
\end{eqnarray}
Comparing the expressions for various components of stress tensor and covariant current of
the fluid obtained from equilibrium partition function \eqref{stfp}, \eqref{cufp} and fluid
constitutive relations \eqref{stc}, we get,
\begin{eqnarray}\label{velocur}
U_{m} &=& - \frac{e^{-2\sigma}}{\epsilon+p}\left[m \alpha_m-(n-m+1)A_0\alpha_{m-1}\right]\nonumber\\
&=&-\frac{e^{-2\sigma}}{\epsilon+p}\left[m \tilde{C}_m T_0^{m+1}-(n+1-m) \tilde{C}_{m-1} A_0T_0^m\right.\nonumber\\
&&\qquad\left.\qquad+\binom{n+1}{m+1}\mathcal{C}_{anom}A_0^{m+1}\right]
\end{eqnarray}
Similarly, we can evaluate
$J_m(\sigma, A_0) $ as follows,
\begin{equation}\label{Jm}
\begin{split}
J_{m}&=e^{-\sigma}\brk{-(m+1)\mathcal{C}_{anom} A_0^{m}\binom{n+1}{m+1}+(n-m+1){\tilde C}_{m-1}T_0^{m}}\\
&+ \frac{qe^{-2\sigma}}{\epsilon+p}\left[m \tilde{C}_m T_0^{m+1}-(n+1-m) \tilde{C}_{m-1} A_0 T_0^m\right.\\
&\qquad\left.\qquad+\binom{n+1}{m+1}\mathcal{C}_{anom}A_0^{m+1}\right]
\end{split}
\end{equation}
We want to now use this to obtain the transport coefficients $\xi_m$ in the last relation of \eqref{conscor}.
For this we have to invert the relations \eqref{eq:jxi} for $\xi_m$. We finally get
\begin{equation}\label{explicitform2n}
\begin{split}
\xi_m &= \brk{m \frac{q\mu}{\epsilon + p}-(m+1)}\mathcal{C}_{anom}\binom{n+1}{m+1} \mu^m\\
&\qquad+\sum_{k=0}^{m}\brk{m \frac{q\mu}{\epsilon + p}-(m-k)}(-1)^{k-1}{\tilde C}_{k}\binom{n-k}{m-k} T^{k+1}\mu^{m-k-1} \\
\end{split}
\end{equation}
This then is the prediction of this transport coefficient via partition function methods. This
exactly matches with the expression from \cite{Loganayagam:2011mu} in \eqref{eq:xiLoga} provided we
make the following identification among the constants $\tilde{C}_m= (-1)^{m-1} C_m$.
\section{Comments on Most Generic Entropy Current}\label{sec:entropy}
Another physical requirement which has long been used as a source of constraints
on fluid dynamical transport coefficients is the local form of second law of
thermodynamics. As we reviewed in the subsection\S\S\ref{subsec:LogaReview} this principle
had been used in \cite{Loganayagam:2011mu} to obtain anomaly induced transports
coefficients in arbitrary even dimensions.\\
In this section we will determine the entropy current in equilibrium by comparing the total
entropy with that obtained from the equilibrium partition function. In the examples studied in
\cite{Banerjee:2012iz,Jain:2012rh} it was seen that in general the comparison
with equilibrium entropy ( obtained from partition function) did not fix
all the non dissipative coefficients in fluid dynamical entropy current. However
it did determine the anomalous contribution exactly. Here we will see that
this holds true in general even dimensions. \\
Let us begin by computing the entropy from the equilibrium partition function.
We begin with the anomalous part of the partition function
\begin{equation}\begin{split}
W_{anom} = \frac{1}{T_0}\int d^{2n-1}x &\sqrt{g_{2n-1}}\bigg\{ \sum_{m=1}^{n}
\alpha_{m-1} \brk{\epsilon A(da)^{m-1}(dA)^{n-m}}\bigg.\\
&\qquad\bigg.\qquad + \alpha_n \brk{\epsilon a(da)^{n-1}} \bigg\}
\end{split}
\end{equation}
where the functions $\alpha_m$ are given in \eqref{Csol}.
The anomalous part of the total entropy is easily computed to be
\begin{equation}\begin{split}\label{entropy1}
S_{anom} &= \frac{\partial}{\partial T_0} \prn{ T_0 W_{anom} } \\
&= \int d^{2n-1}x \sqrt{g_{2n-1}} \bigg\{ \sum_{m=1}^{n}
m~T_0^{m-1} ~\tilde{C}_{m-1}\ \brk{\epsilon A(da)^{m-1}(dA)^{n-m}} \\
& \quad + ~(n+1) \tilde{C}_n ~T_0^n \brk{\epsilon a(da)^{n-1}} \bigg\}\\
&= \int d^{2n-1}x \sqrt{g_{2n-1}} \bigg\{ \sum_{m=1}^{n}
(m+1) ~T_0^m ~\tilde{C}_m\ \brk{\epsilon a(da)^{m-1}(dA)^{n-m}} \\
& \qquad + \tilde{C}_0 \brk{\epsilon A(dA)^{n-1}} \bigg\}
\end{split}
\end{equation}
Now we will determine the most general form of entropy current in equilibrium by
comparison with \eqref{entropy1}. In \cite{Banerjee:2012iz} it was argued that the entropy
current by itself is not a physical object, but entropy production and total entropy
are. This gave a window for gauge non invariant contribution to entropy current but
the contribution was removed by CPT invariance. Here also we will allow for such
gauge non invariant terms in the entropy current. The most general form of entropy
current, allowing for gauge non invariant pieces, is then
\begin{equation}
\begin{split}
J^\mu_S &= s u^\mu -\frac{\mu}{T} J^{\mu}_{odd} + \sum_{m=1}^{n} \chi_m \varepsilon^{\mu\nu\ldots} u_\nu (\partial u)^{m-1}
(\partial\hat{\mathcal{A}})^{n-m}\\
&\qquad + \zeta \varepsilon^{\mu\nu\ldots} \hat{\mathcal{A}}_\nu (\partial\hat{\mathcal{A}})^{n-1}
\end{split}
\end{equation}
where $\chi_m$ is a function of $T$ and $\mu$ whereas $\zeta$ is a constant
.
The correction to the local entropy density (i.e., the time component of the entropy current) can be written
after an integration by parts as
\begin{equation}\label{0entcur}\begin{split}
\delta J_S^0 = \varepsilon^{0ij\ldots}\brk{ \zeta A(dA)^{n-1} + \sum_{k=1}^{n} {\tilde f}_k\ a\ (da)^{k-1}\ (dA)^{n-k} }_{ij\ldots} +\text{total derivatives}
\end{split}
\end{equation}
where
\begin{equation}
\begin{split}
{\tilde f}_m &\equiv - s U_m +\frac{\mu}{T} J_m+\zeta A_0^m \binom{n}{m}+\sum_{k=1}^{m} \binom{n-k}{m-k}\chi_k \prn{-e^{\sigma}}^k A_0^{m-k}\\
\end{split}
\end{equation}
The correction to the entropy is then,
\begin{equation}
\begin{split}\label{entropy2}
\delta S &= \int d^{2n-1}x \sqrt{g_{2n}} ~J^0_S \\
& = \int d^{2n-1}x \sqrt{g_{2n-1}}
\brk{ \zeta \brk{\epsilon A(dA)^{n-1}} + \sum_{m=1}^{n} {\tilde f}_m\ \brk{\epsilon a\ (da)^{m-1}\ (dA)^{m-k}} }
\end{split}
\end{equation}
Comparing the two expressions of total equilibrium entropy \eqref{entropy1} and
\eqref{entropy2} we find the following expressions of the various coefficients in
the entropy current \eqref{0entcur},
\begin{eqnarray}\label{resultec}
\zeta &=& {\tilde C}_0 \quad\text{and}\quad \tilde{f}_k = (k+1) ~T_0^{k} ~{\tilde C}_{k} {\rm ~~for~~} 0 \leq k \leq n
\end{eqnarray}
This in turn implies that
\begin{equation}
\begin{split}
T_0\sum_{k=1}^{m} &\binom{n-k}{m-k}\chi_k \prn{-e^{\sigma}}^k A_0^{m-k}\\
&= \tilde{C}_mT_0^{m+1}+m\binom{n}{m}\mathcal{C}_{anom}A_0^{m+1}-\tilde{C}_0 T_0A_0^m \binom{n}{m}
\end{split}
\end{equation}
which can be inverted to give
\begin{equation}
\begin{split}
\chi_m &= - \mathcal{C}_{anom}\binom{n+1}{m+1} T^{-1}\mu^{m+1}-\sum_{k=0}^{m}\tilde{C}_k (-1)^{k-1}\binom{n-k}{m-k} T^k\mu^{m-k} \\
\zeta &= {\tilde C}_0 \\
\end{split}
\end{equation}
which matches with the prediction from \cite{Loganayagam:2011mu} in equation \eqref{eq:Chi_mPrediction}
again with the identification $C_m(-1)^{m-1} = \tilde{C}_m$. We see that in the entropy current
we have a total of $n+1$ constants as in the equilibrium partition function.
This completes our partition function analysis and our re derivation of the results of \cite{Loganayagam:2011mu}
via partition function techniques. We see that the transport coefficients match exactly with the results
obtained via entropy current (provided the analysis of \cite{Loganayagam:2011mu} is extended by allowing
gauge-non-invariant pieces in the entropy current). This detailed match of transport coefficients
warrants the question whether the form of the equilibrium partition function itself
can be directly derived from the expressions of \cite{Loganayagam:2011mu}
quoted in \ref{subsec:LogaReview}. We turn to this question in the next section.
\section{Gibbs current and Partition function}\label{sec:IntByParts}
We begin by repeating the expression for the Gibbs current in \eqref{eq:GCovBOmega} which was central to the results of
\cite{Loganayagam:2011mu}.
\begin{equation}
\begin{split}
\bar{\mathcal{G}}^{Cov}_{anom}
&= C_0 T \hat{\mathcal{A}}\wedge\mathcal{F}^{n-1}+ \sum_{m=1}^{n}\left[\mathcal{C}_{anom}\binom{n+1}{m+1}\mu^{m+1}\right.\\
&\qquad \left. + \sum_{k=0}^{m}C_k \binom{n-k}{m-k} T^{k+1}\mu^{m-k}\right] (2\omega)^{m-1} \mathcal{B}^{n-m}\wedge u \\
\end{split}
\end{equation}
The subscript `anom' denotes that we are considering only a part of the entropy current relevant to anomalies.
The superscript `Cov' refers to the fact that this is the Gibbs free energy computed by turning on a chemical
potential for the \textbf{covariant} charge.
Let us ask how this expression would be modified if the Gibbs free energy was computed by turning on a
chemical potential for the \textbf{consistent} charge instead. The change from covariant charge to
consistent charge/current is simply given by a shift as given by the equation\eqref{eq:shift}.
This shift does not depend on the state of the theory but is purely a functional of the
background gauge fields. Thinking of Gibbs free energy as minus temperature
times the logarithm of the Eucidean path integral, a conversion from covariant
charge to a consistent charge induces a shift
\[ \bar{\mathcal{G}}^{Cov}_{anom} = \bar{\mathcal{G}}^{Consistent}_{anom} - \mu\ n\ \mathcal{C}_{anom}\hat{\mathcal{A}}\wedge \mathcal{F}^{n-1}\]
which gives
\begin{equation}\label{eq:GConsBOmega}
\begin{split}
&\bar{\mathcal{G}}^{Consistent}_{anom} \\
&= \sum_{m=1}^{n}\left[\mathcal{C}_{anom}\binom{n+1}{m+1}\mu^{m+1} +\sum_{k=0}^{m}C_k \binom{n-k}{m-k} T^{k+1}\mu^{m-k}\right]
(2\omega)^{m-1} \mathcal{B}^{n-m}\wedge u \\
&\qquad + \brk{C_0 T + n\mathcal{C}_{anom}\mu } \hat{\mathcal{A}}\wedge\mathcal{F}^{n-1}\\
\end{split}
\end{equation}
This now a Gibbs current whose $\mu$ derivative gives the consistent current rather than a covariant current. It is
easy to check that this solves an adiabaticity equation very similar to the one quoted in equation\eqref{eq:adiabG}
\begin{equation}\label{eq:adiabGCons}
\begin{split}
d\bar{\mathcal{G}}^{Consistent}_{anom} &+ \mathfrak{a} \wedge \bar{\mathcal{G}}^{Consistent}_{anom}+n\mathcal{C}_{anom}\prn{\hat{\mathcal{A}}+\mu u}\wedge\mathcal{E}\wedge \mathcal{B}^{n-1}\\
&= \prn{dT+\mathfrak{a}T}\wedge \frac{\partial\bar{\mathcal{G}}^{Consistent}_{anom}}{\partial T}
+ \prn{d\mu+\mathfrak{a}\mu-\mathcal{E}}\wedge \frac{\partial\bar{\mathcal{G}}^{Consistent}_{anom}}{\partial \mu}
\end{split}
\end{equation}
The question we wanted to address is how this Gibbs current is related to the partition function in
equation \eqref{action}.
The answer turns out to be quite intuitive - we would like to argue in this section that
\begin{equation}\label{eq:ZGibbs}
W_{anom} = \ln\ Z^{anom}_{Consistent} = - \int_{space}\frac{1}{T} \bar{\mathcal{G}}^{Consistent}_{anom}
\end{equation}
This equation instructs us to pull back the $2n-1$ form in equation \eqref{eq:GConsBOmega} (divided by local temperature)
and integrate it on an arbitrary spatial hyperslice to obtain the anomalous contribution to
negative logarithm of the equilibrium path integral. Note that pulling back the Hodge dual of
Gibbs current on a spatial hyperslice is essentially equivalent to integrating its zero
component (i.e., the Gibbs density) on the slice. Seen this way the above relation is
the familiar statement relating Gibbs free energy to the grand-canonical partition function.
\subsection{Reproducing the Gauge variation}
Before giving an explicit proof of the relation\eqref{eq:ZGibbs} we will check in this
subsection that the relation\eqref{eq:ZGibbs} essentially gives the correct gauge variation to the path-integral
at equilibrium. This will provide us with a clearer insight on how the program of \cite{Banerjee:2012iz} to
write a local expression in the partition function to reproduce the anomaly works.
The gauge variation of\eqref{eq:ZGibbs} under $\delta\hat{\mathcal{A}}=d\delta\lambda$ is
\begin{equation}
\begin{split}
\delta W_{anom} &= \delta \ln\ Z^{anom}_{Consistent} = - \int_{space}\frac{1}{T} \delta\bar{\mathcal{G}}^{Consistent}_{anom}\\
&= -\int_{space}\brk{C_0 + n\mathcal{C}_{anom}\frac{\mu}{T} } \delta\hat{\mathcal{A}}\wedge\mathcal{F}^{n-1}\\
&= -\int_{space}\brk{C_0 + n\mathcal{C}_{anom}\frac{\mu}{T} } d\delta\lambda\wedge\mathcal{F}^{n-1}\\
&= -\int_{surface}\delta\lambda\brk{C_0 + n\mathcal{C}_{anom}\frac{\mu}{T} } \wedge\mathcal{F}^{n-1}
+ n\mathcal{C}_{anom}\int_{space}\delta\lambda d\prn{\frac{\mu}{T}} \wedge\mathcal{F}^{n-1}
\end{split}
\end{equation}
We will now ignore the surface contribution and use the fact that chemical equilibrium demands that
\[ Td\prn{\frac{\mu}{T}} = \mathcal{E} \]
where $\mathcal{E}\equiv u^\nu\mathcal{F}_{\mu\nu}dx^\nu$ is the rest frame electric-field. This is
essentially a statement (familiar from say semiconductor physics) that in equilibrium the
diffusion current due to concentration gradients should cancel the drift ohmic current due to the
electric field. Putting this in along with the electric-magnetic decomposition
$\mathcal{F}=\mathcal{B}+u\wedge \mathcal{E}$, we get
\begin{equation}
\begin{split}
\delta W_{anom} &= \delta \ln\ Z^{anom}_{Consistent} = \mathcal{C}_{anom}\int_{space}\frac{\delta\lambda}{T} n\mathcal{E}\wedge\mathcal{B}^{n-1}
\end{split}
\end{equation}
which is the correct anomalous variation required of the equilibrium path-integral !
In $d=2n=4$ dimensions for example we get the correct $E.B$ variation along with the
$1/T$ factor coming from the integration over euclidean time-circle. The factor of $n$
comes from converting to electric and magnetic fields
\[ \mathcal{F}^n = n\ u\wedge \mathcal{E}\wedge\mathcal{B}^{n-1} \]
Thus the shift piece along with the chemical equilibrium conspires to reproduce the
correct gauge variation. The reader might wonder why this trick cannot be made to work
by just keeping the shift term alone in the Gibbs current - the answer is of course
that other terms are required if one insists on adiabaticity in the sense that
we want to solve \eqref{eq:adiabGCons}.
\subsection{Integration by parts}
In this subsection we will prove \eqref{eq:ZGibbs} explicitly. We will begin by evaluating the
consistent Gibbs current in the equilibrium configuration. We will as before
work in the `zero $\mu_0$' gauge.
Using the relations in the appendix~\ref{app:hydrostatics} we get the consistent Gibbs current as
\begin{equation}
\begin{split}
-\frac{1}{T}&\bar{\mathcal{G}}^{Consistent}_{anom}\\
&=\frac{1}{T_0}\sum_{m=1}^{n}\left[ C_m(-1)^{m-1} T_0^{m+1}-C_0(-1)^{0-1}\binom{n}{m}T_0A_0^m\right.\\
&\qquad\left. -\binom{n}{m+1}\mathcal{C}_{anom}A_0^{m+1}\right] (da)^{m-1}(dA)^{n-m}\wedge (dt+a)\\
&\quad -\frac{1}{T_0} \brk{n \mathcal{C}_{anom}A_0 +C_0T_0} A\wedge (dA+A_0 da)^{n-1}\\
&\quad -\frac{(n-1)}{T_0} \brk{n \mathcal{C}_{anom}A_0 +C_0T_0} A\wedge dA_0\wedge(dt+a)\wedge (dA+A_0 da)^{n-2}\\
\end{split}
\end{equation}
After somewhat long set of manipulations one arrives at the following form for the consistent Gibbs current
\begin{equation}
\begin{split}
-\frac{1}{T}&\bar{\mathcal{G}}^{Consistent}_{anom}\\
&= d\left\{\frac{A}{T_0}
\sum_{m=1}^{n-1}\left[ C_m(-1)^{m-1} T_0^{m+1}-C_0(-1)^{0-1}\binom{n-1}{m}T_0A_0^m\right.\right.\\
&\qquad\left.\left. +m\binom{n}{m+1}\mathcal{C}_{anom}A_0^{m+1}\right] (da)^{m-1}(dA)^{n-1-m}\wedge (dt+a)\right\}\\
&\quad +\frac{A}{T_0}\sum_{m=1}^{n}\brk{C_{m-1}(-1)^{m-2} T_0^m-\binom{n}{m} \mathcal{C}_{anom} A_0^m}(da)^{m-1}(dA)^{n-m}\\
&\quad+ C_n(-1)^{n-1} T_0^{n}(da)^{n-1}\wedge (dt+a)\\
\end{split}
\end{equation}
Here we have taken out a surface contribution which we will suppress from now on since it does not contribute
to the partition function. This final form is easily checked term by term and we will leave that as an exercise
to the reader.
Suppressing the surface contribution we can write
\begin{equation}
\begin{split}
-\frac{1}{T}&\bar{\mathcal{G}}^{Consistent}_{anom}\\
&= d\brk{\ldots} +\frac{A}{T_0}\sum_{m=1}^{n}\brk{C_{m-1}(-1)^{m-2} T_0^m-\binom{n}{m} \mathcal{C}_{anom} A_0^m}(da)^{m-1}(dA)^{n-m}\\
&\quad+ C_n(-1)^{n-1} T_0^{n}(da)^{n-1}\wedge (dt+a)\\
&=d\brk{\ldots} +\frac{A}{T_0}\wedge
\sum_{m=1}^{n}\alpha_{m-1}(da)^{m-1}(dA)^{n-m} + \frac{dt+a}{T_0} \wedge \alpha_n(da)^{n-1} \\
\end{split}
\end{equation}
where we have defined
\begin{equation}\label{eq:alphaC}
\begin{split}
\alpha_m &= C_{m}(-1)^{m-1} T_0^{m+1}-\binom{n}{m+1} \mathcal{C}_{anom} A_0^{m+1}\quad \text{for}\ m<n\\
\alpha_n &= C_{n}(-1)^{n-1} T_0^{n+1}\\
\end{split}
\end{equation}
To get the contribution to the equilibrium partition function,
we integrate the above equation over the spatial slice (putting $dt=0$).
We will neglect surface contributions to get
\begin{equation}
\begin{split}
&\prn{\ln \mathcal{Z}}^{Consistent}_{anom} \\
&=\int_{\text{space}}\frac{A}{T_0}\wedge
\sum_{m=1}^{n}\brk{C_{m-1}(-1)^{m-2} T_0^m-\binom{n}{m} \mathcal{C}_{anom} A_0^m}(da)^{m-1}(dA)^{n-m} \\
&\qquad + \int_{\text{space}}C_n(-1)^{n-1} T_0^n a \wedge(da)^{n-1} \\
&=\int_{\text{space}}\frac{A}{T_0}\wedge
\sum_{m=1}^{n}\alpha_{m-1}(da)^{m-1}(dA)^{n-m} + \int_{\text{space}} \frac{a}{T_0} \wedge \alpha_n(da)^{n-1} \\
\end{split}
\end{equation}
with $\alpha_{m}$s given by \eqref{eq:alphaC}. We are essentially done - we have got the form in
\eqref{action} and comparing the equations \eqref{eq:alphaC} and \eqref{Csol} we find a perfect
agreement with the usual relation $C_m(-1)^{m-1}=\tilde{C}_m$. Now by varying this
partition function we can obtain currents as before (the variation can be directly done in
form language using the equations we provide in appendix~\ref{app:variationForms}).
With this we have completed a whole circle showing that the two formalisms for anomalous transport
developed in \cite{Loganayagam:2011mu} and \cite{Banerjee:2012iz} are
completely equivalent.
Before we conclude, let us rewrite the partition function in terms of the polynomial
$\mathfrak{F}^\omega_{anom}[T,\mu]$ as
\begin{equation}
\begin{split}
&\prn{\ln \mathcal{Z}}^{Consistent}_{anom} \\
&=\int_{\text{space}}\frac{A}{T_0 da}\wedge
\brk{ \frac{ \mathfrak{F}^\omega_{anom}[-T_0 da, dA]-\mathfrak{F}^\omega_{anom}[-T_0 da, 0]}{dA}
-\frac{ \mathfrak{F}^\omega_{anom}[0, dA+A_0 da]}{dA+A_0 da}}\\
&\qquad + \int_{\text{space}}\frac{\mathfrak{F}^\omega_{anom}[-T_0 da, 0]}{(T_0da)^2}\wedge T_0 a \\
\end{split}
\end{equation}
We will consider an example. Using adiabaticity arguments, the authors of \cite{Loganayagam:2012pz}
derived the following expression for
a theory of free Weyl fermions in $d=2n$ spacetime dimensions
\begin{equation}
\begin{split}
\prn{{\mathfrak{F}}^\omega_{anom}}^{free\ Weyl}_{d=2n}&=- 2\pi\sum_{species} \chi_{_{d=2n}} \brk{\frac{\frac{\tau}{2}T}{\sin \frac{\tau}{2}T}e^{\frac{\tau}{2\pi}q\mu}}_{\tau^{n+1}} \\
\end{split}
\end{equation}
where $\chi_{_{d=2n}}$ is the chirality and the subscript $\tau^{n+1}$ denotes that one needs to Taylor-expand in $\tau$ and
retain the coefficient of $\tau^{n+1}$. Substituting this into the above expression gives the anomalous part
of the partition function of free Weyl fermions.
\section{Fluids charged under multiple $U(1)$ fields}\label{sec:2ndimmul}
In this section, we will generalize our results to cases where
we have multiple abelian $U(1)$ gauge fields in arbitrary $2n-$dimensions.
We can take
\begin{equation}\label{eq:FOmegaCmulti}
\begin{split}
\mathfrak{F}^\omega_{anom}[T,\mu] &= \mathcal{C}_{anom}^{A_1 \ldots A_{n+1}}\mu_{A_1} \ldots \mu_{A_{n+1}}+\sum_{m=0}^{n}C_m^{A_1\ldots A_{n-m}} T^{m+1}\mu_{A_1\ldots A_{n-m}}.\\
\end{split}
\end{equation}
In this case,
the anomaly equation takes the following form,
\begin{equation}\label{anomeq}
\nabla_{\mu} J^{\mu,A_{n+1}}_{Cov} =\frac{n+1}{2^n} {\cal C}_{anom}^{A_1 A_2\ldots A_{n+1}} \varepsilon^{\mu_1\nu_1\mu_2\nu_2 \ldots \mu_n\nu_n}
\prn{\mathcal{F}_{\mu_1 \nu_1}}_{A_1} \ldots \prn{\mathcal{F}_{\mu_n \nu_n}}_{A_n} .
\end{equation}
Where, in $2n$ dimensions ${\cal C}_{anom} $ has $n+1$ indices denoted by $(A_1,A_2 \cdot A_{n+1})$ and it is symmetric in all its indices.
It is straightforward to carry on the above computation for the case of multiple $U(1)$
charges and most of the computations remains the same.
Now, for the multiple $U(1)$ case, in partition function \ref{action} the functions $\alpha_m$ and the
constants $\tilde{C}_m$ (and the constants $C_m$ appearing in $\mathfrak{F}^\omega_{anom}$)
have $n-m$ number of indices which are contracted with $n-1-m$
number of $dA$ and one $A$. The constant $\zeta$ appearing in the entropy current has
$n$ indices.
The constant $\tilde{C}_{n}$ (and $\alpha_n$) has no index. All these constants
are symmetric in their indices. Considering the above index structure into account, we can
understand that the functions $U_m$ appearing in velocity correction and $\chi_m$ appearing in
entropy corrections has $n-m$ indices and the function $J_{m}$ appearing in the charge current
has $n-m+1$ indices. Now, we can write the generic form of these functions as follows:
\begin{equation}
\begin{split}
U_m^{A_1A_2\ldots A_{n-m}}&=-\frac{e^{-2\sigma}}{\epsilon+p} \left[ m\tilde C_{m}^{A_1A_2\ldots A_{n-m}}T_0^{m+1}\right.\\
&\qquad -(n+1-m)\tilde C_{m-1}^{A_1A_2\ldots A_{n-m}B_1}(A_0)_{B_1}T_0^m \\
&\qquad \left.
+\binom{n+1}{m+1}\mathcal{C}_{anom}^{A_{1}\ldots A_{n-m}B_{1}\ldots B_{m+1}}(A_{0})_{B_{1}}\ldots (A_0)_{B_{m+1}}\right]\\
\end{split}
\end{equation}
where $\prn{A_0}_{B_1}$ comes from the $B_1$th gauge field.
Similarly, we can write the coefficients appearing in $A$'th charge current ($J^{A}$) as,
\begin{equation}\label{transport}
\begin{split}
\prn{J^A}_m^{A_1 A_2\ldots A_{n-m}}&=e^{-\sigma}\left[- (m+1) \mathcal{C}_{anom}^{A A_1 \ldots A_{n-m}B_{1}\ldots B_m} (A_0)_{B_{1}}\ldots (A_0)_{B_{m}}\binom{n+1}{m+1}\right.\\
&\qquad \left.+ (n-m+1){\tilde C}_{m-1}^{A A_1\ldots A_{n-m}}T_0^m \right]\\
&\qquad +\frac{q^A e^{-2\sigma}}{\epsilon+p} \left[ m\tilde C_{m}^{A_1A_2\ldots A_{n-m}}T_0^{m+1}\right.\\
&\qquad -(n+1-m)\tilde C_{m-1}^{A_1A_2\ldots A_{n-m}B_1}(A_0)_{B_1}T_0^m \\
&\qquad \left.
+\binom{n+1}{m+1}\mathcal{C}_{anom}^{A_{1}\ldots A_{n-m}B_{1}\ldots B_{m+1}}(A_{0})_{B_{1}}\ldots (A_0)_{B_{m+1}}\right]\\
\end{split}
\end{equation}
We can also express the transport coefficients for fluids charged under multiple $U(1)$ charges,
generalising equation \eqref{explicitform2n} as,
\begin{equation}
\begin{split}
&\prn{\xi^A}_m^{A_1 A_2\ldots A_{n-m}} \\
&\ = \brk{m \frac{q^A\mu_B}{\epsilon + p}-(m+1)\delta^A_B}\mathcal{C}_{anom}^{BA_1\ldots A_{n-m}B_1\ldots B_m}
\binom{n+1}{m+1} \mu_{B_1}\ldots\mu_{B_m}\\
&\quad +\sum_{k=0}^{m-1}\brk{m \frac{q^A\mu_B}{\epsilon + p}-(m-k)\delta^A_B}\\
&\qquad \times (-1)^{k-1}{\tilde C}_{k}^{BA_1\ldots A_{n-m}B_1\ldots B_{m-k-1}}\binom{n-k}{m-k} T^{k+1}\mu_{B_1} \ldots \mu_{B_{m-k-1}} \\
&\quad +\brk{m \frac{q^A}{\epsilon + p}} (-1)^{m-1}{\tilde C}_{m}^{A_1\ldots A_{n-m}} T^{m+1} \\
\end{split}
\end{equation}
Similarly the coefficieints $\chi_m$ appearing entropy current become
\begin{equation}
\begin{split}
\chi_m^{A_{1}\ldots A_{n-m}} &= - \mathcal{C}_{anom}^{A_{1}\ldots A_{n-m} B_1\ldots B_{m+1}}\binom{n+1}{m+1} T^{-1}\mu_{B_{1}}\ldots \mu_{B_{m+1}}\\
&-\sum_{k=0}^{m}(-1)^{k-1}\binom{n-k}{m-k} T^k \tilde{C}_k^{A_{1}\ldots A_{n-m}B_1\ldots B_{m-k}} \mu_{B_{1}}\ldots \mu_{B_{m-k}}\\
\end{split}
\end{equation}
This finishes the analysis of anomalous fluid charged under multiple abelian $U(1)$ gauge fields.
\section{CPT Analysis}\label{sec:CPT}
In this section we analyze the constraints of 2n dimensional CPT invariance on the analysis
of our previous sections.
\begin{table}
\centering
\begin{tabular}[h]{|c|c|c|c|c|}
\hline
Name & Symbol & CPT \\
\hline
Temperature & $T$ & + \\
\hline
Chemical Potential & $\mu$ & - \\
\hline
Velocity 1-form & $u$ & + \\
\hline
Gauge field 1-form &$\hat{\mathcal{A}}$ & - \\
\hline
Exterior derivative & $d$ & - \\
\hline
Field strength 2-form & $\mathcal{F}=d\hat{\mathcal{A}}$ & +\\
\hline
Magnetic field 2-form & $\mathcal{B}$ & +\\
\hline
Vorticity 2-form & $\omega$ & -\\
\hline
\end{tabular}
\caption{\label{tab:CPTform}Action of CPT on various forms}
\end{table}
Let us first examine the CPT transformation of the Gibbs current proposed in
\cite{Loganayagam:2011mu}. Using the Table\S\ref{tab:CPTform}
we see that the Gibbs current in Eqn.\eqref{eq:GCovBOmega}
is CPT-even provided the coefficients $\{\mathcal{C}_{anom},C_{2k+1}\}$
are CPT-even and the coefficients $C_{2k}$ are CPT-odd. Since in a
CPT-invariant theory all CPT-odd coefficients should vanish, we
conclude that $C_m=0$ for even $m$. This conclusion can be phrased
as
\begin{equation} CPT\quad : \quad C_m(-1)^{m-1}= C_m \end{equation}
Note that this is the same conclusion as reached by assuming the
relation to the anomaly polynomial.
Next we analyze the constraints of 2n dimensional CPT invariance on the partition function
\eqref{action}. Our starting point is a partition function of the fluid and we expect it
to be invariant under $2n$dimensional CPT transformation of the fields.
Table\S\ref{cpttab} lists the effect of 2n dimensional C, P and T transformation on various field
appearing in the partition function \eqref{action}. Since $a_i$ is even while $A_i$ and $\partial_j$ are odd under CPT,
the term with coefficient $C_m$ picks up a factor of $(-1)^{(m+1)}$.
Thus CPT invariance tells us that $C_m$ must be
\begin{itemize}
\item even function of $A_0$ for odd $m$.
\item odd function of $A_0$ for even $m$.
\end{itemize}
Now the coefficients $C_m$ are fixed upto constants $\tilde{C}_m$ by the requirement that
the partition function reproduces the correct anomaly. Note that the $A_0$(odd under CPT) dependence of
the coefficients $C_m$ thus determined are consistent with the requirement CPT invariance.
Further, CPT invariance forces $\tilde{C}_m = 0$ for even $m$. The last term in the partition function
\eqref{action} is odd under parity and thus its coefficient is set to zero by CPT for
even $n$ whereas for odd $n$ it is left unconstrained.
Thus finally we see that CPT invariance allows for a total of
\begin{itemize}
\item $\frac{n}{2}$ constant ($\tilde{C}_m$ with $m$ odd) for even $n$.
\item $\frac{n+1}{2}$ constants ($\tilde {C}_m$ with $m$ even and $\tilde {C}_n$) for odd $n$.
\end{itemize}
In particular the coefficient $\tilde C_0$ always vanishes and thus, for a CPT invariant theory, we never get
the gauge-non invariant contribution to th elocal entropy current.
\begin{table}
\centering
\begin{tabular}[h]{|c|c|c|c|c|}
\hline
fields & C & P & T & CPT \\
\hline
$\sigma$ & + & + & + & + \\
\hline
$a_i$ & + & - & - & + \\
\hline
$g_{ij}$ & + & + & + & + \\
\hline
$A_0$ & - & + & + & - \\
\hline
$A_i$ & - & - & - & - \\
\hline
\end{tabular}
\caption{\label{cpttab} Action of CPT on various field}
\end{table}
\section{Conclusion}\label{sec:conclusion}
In this paper we have shown that the results of \cite{Kharzeev:2011ds, Loganayagam:2011mu} can
based on entropy arguments can be re derived within a more field-theory friendly
partition function technique\cite{Banerjee:2012iz,Jain:2012rh,Jensen:2012jh,Jensen:2012jy}.
This has led us to a deeper understanding linking the local description of anomalous
transport in terms of a Gibbs current \cite{Loganayagam:2011mu,Loganayagam:2012pz} to
the global description in terms of partition functions.
An especially satisfying result is that the polynomial structure of anomalous transport
coefficients discovered in \cite{Loganayagam:2011mu} is reproduced at the level of
partition functions. There it was shown that the whole set of anomalous transport
coefficients are essentially governed by a single homogeneous polynomial
$\mathfrak{F}^\omega_{anom}[T,\mu]$ of temperature and chemical potentials.
The authors of \cite{Loganayagam:2012pz} noticed that in a free theory of
chiral fermions this polynomial structure is directly linked to the corresponding
anomaly polynomial of chiral fermions via a replacement rule
\begin{equation}
\begin{split}
\mathfrak{F}_{anom}^\omega[T,\mu] = \mathcal{P}_{anom} \brk{ \mathcal{F} \mapsto \mu, p_1(\mathfrak{R}) \mapsto - T^2 , p_{k>1}(\mathfrak{R}) \mapsto 0 }
\end{split}
\end{equation}
This result could be generalised for an arbitrary free theory with chiral
fermions and chiral p-form fields using sphere partition
function techniques which link this polynomial
to a specific thermal observable\cite{futureLoga}.
Various other known results (for example in AdS/CFT)
support the conjecture that this rule is probably true in all
theories with some mild assumptions. While we
have succeeded in reproducing the polynomial structure we have not tried
in this paper to check the above conjecture - this necessarily involves a
similar analysis keeping track of the effect of gravitational anomalies which
we have ignored in our work. It would be interesting to extend our analysis
to theories with gravitational anomalies\footnote{As we were finalising this manuscript, a
paper\cite{Valle:2012em} dealing with $1+1$d gravitational anomalies appeared in arXiv.
We thank Amos Yarom for various discussions regarding this topic.}.
We have derived in this paper a particular contribution to the equilibrium partition function
that is linked to the underlying anomalies of the theory. A direct test of this result would
be to do a direct holographic computation of the same quantity in AdS/CFT to obtain these
contributions. Since the CFT anomalies are linked to the Chern-Simons terms in the bulk
the holographic test would be a computation of a generalised Wald entropy for a black hole
solution of a gravity theory with Chern-Simons terms. The usual Wald entropy gets modified
in the presence of such Chern-Simons terms\cite{Tachikawa:2006sz,Bonora:2011gz} which are
usually a part of higher derivative corrections to gravity. We hope that reproducing
the results of this paper would give us a test of generalised Wald formalism
for such higher derivative corrections.
We have directly linked the description in terms of a Gibbs current\cite{Loganayagam:2011mu,Loganayagam:2012pz}
satisfying a kind of adiabticity equation to the global description in terms of partition functions.
Further we have noticed in \eqref{eq:GibbsChi} that at least in the case of anomalous transport this Gibbs current
is closely linked to what has been called `the non-canonical part of the entropy current '
in various entropy arguments\cite{Bhattacharyya:2012ex}. It would be interesting to see whether
this construction can be generalised beyond the anomalous transport coefficients
to other partition function computations which appear in \cite{Banerjee:2012iz,Jensen:2012jh}.
This would give us a more local interpretation of the various terms appearing in the
partition function linking them to a specific Gibbs free energy transport process. Hence with such
a result one could directly identify the coefficients appearing in the partition function
as the transport coefficients of the Gibbs current.
Another interesting observation of \cite{Loganayagam:2011mu} apart from the polynomial structure
is that the anomalous transport satisfies an interesting reciprocity type relation \eqref{eq:reciprocity}
- the susceptibility describing the change in the anomalous charge current
with a small change in vorticity is equal to the susceptibility
describing the change in the anomalous energy current with a small change
in magnetic field. While we see that the results of our paper are consistent
with this observation made in \cite{Loganayagam:2011mu}, we have not succeeded
in deriving this relation directly from the partition function. It would be
interesting to derive such a relation from the partition function hence
clarifying how such a relation arises in a microscopic description .
Finally as we have emphasised in the introductions one would hope that
the results of our paper serve as a starting point for generalising
the analysis of anomalies to non-equilibrium phenomena. Can one
write down a Schwinger-Keldysh functional which transforms appropriately -
does this provide new constraints on the dissipative transport coefficients ?
We leave such questions to future work.
\subsection*{Acknowledgements}
We would like to thank Sayantani Bhattacharyya for collaboration in
the initial stages of this project.
It is a pleasure to thank Jyotirmoy Bhattacharya, Dileep Jatkar,
Shiraz Minwalla, Mukund Rangamani, Piotr Surowka, Amos Yarom and
Cumrun Vafa for various useful discussions on ideas presented in this
paper. Research of NB is supported by NWO Veni grant, The Netherlands.
RL would like to thank \textbf{ICTS discussion meeting on
current topics of research in string theory}
at the International Centre for Theoretical Sciences(TIFR) , IISc Bangalore for
their hospitality while this work was being completed. RL is supported by the Harvard Society of Fellows
through a junior fellowship. Finally, RL would like to thank various colleagues at the
Harvard society for interesting discussions. Finally, we would like to thank people of India for their generous support
to research in science.
\newpage
|
{
"timestamp": "2012-06-29T02:00:19",
"yymm": "1206",
"arxiv_id": "1206.6499",
"language": "en",
"url": "https://arxiv.org/abs/1206.6499"
}
|
\section{Introduction}\label{intro}
In Toral's \cite{T01} \textit{cooperative Parrondo games}, there are $N\ge3$ players labeled from 1 to $N$ and arranged in a circle in clockwise order. At each turn, one player is chosen at random to play. Call him player $i$. He plays either game $A$ or game $B$. In game $A$ he tosses a fair coin. In game $B$ he tosses a $p_0$-coin (i.e., $p_0$ is the probability of heads) if his neighbors $i-1$ and $i+1$ are both losers, a $p_1$-coin if $i-1$ is a loser and $i+1$ is a winner, a $p_2$-coin if $i-1$ is a winner and $i+1$ is a loser, and a $p_3$-coin if $i-1$ and $i+1$ are both winners. (Because of the circular arrangement, player 0 is player $N$ and player $N+1$ is player 1.) A player's status as winner or loser depends on the result of his most recent game. The player of either game wins one unit with heads and loses one unit with tails. Under these assumptions, the model has an integer parameter $N\ge3$ and four probability parameters $p_0,p_1,p_2,p_3\in[0,1]$. Game $A$ is fair, so the games are said to exhibit the \textit{Parrondo effect} if game $B$ is losing or fair and the random mixture $C:=\gamma A+(1-\gamma)B$ (i.e., toss a $\gamma$-coin, playing game $A$ if heads, game $B$ if tails) or the nonrandom periodic pattern $C:=A^r B^s$ is winning. Toral used simulation to find a case (namely, $N=50$, $100$, or $200$, $p_0=1$, $p_1=p_2=4/25$, and $p_3=7/10$) in which the Parrondo effect appears when $\gamma=1/2$ or $r=s=2$, thereby providing a new example of \textit{Parrondo's paradox} (Harmer and Abbott \cite{HA02}, Abbott \cite{A10}).
Ethier and Lee \cite{EL12c} studied the random mixture case with $\gamma=1/2$. In this paper we focus on the nonrandom pattern case. Denoting the mean profits per turn to the ensemble of $N$ players by $\mu_{(\gamma,1-\gamma)}^N$ and $\mu_{[r,s]}^N$ in the two cases of game $C$ and by $\mu_B^N$ in the case of game $B$, it was shown in \cite{EL12c} that $\mu_B^N$ converges under certain conditions on the parameters, and that $\mu_{(1/2,1/2)}^N$ converges essentially always. The limits can be described in terms of a parameterized spin system on the one-dimensional integer lattice. Of course one can get similar results for $\mu_{(\gamma,1-\gamma)}^N$ for $0<\gamma<1$. Here we show that $\mu_{[r,s]}^N$ converges, under certain conditions, to the same limit that $\mu_{(\gamma,1-\gamma)}^N$ converges to, where $\gamma:=r/(r+s)$. A similar phenomenon is present in a nonspatial $N$-player model of Toral \cite{T02}, as shown in \cite{EL12a}, although in that setting, $\mu_{(\gamma,1-\gamma)}^N$ does not depend on $N$.
Numerical studies \cite{EL12b,EL12d} suggest that $\mu_{[r,s]}^N$ converges much more slowly than $\mu_{(\gamma,1-\gamma)}^N$. For example, let us consider the special case $p_0=1/10$, $p_1=p_2=3/5$, and $p_3=3/4$. By $N=18$ (the largest $N$ for which computations have been done in the nonrandom-pattern case), $\mu_{[1,1]}^N$ matches its limiting value to only two significant digits. On the other hand, by $N=19$ (the largest $N$ for which computations have been done in the random-mixture case), $\mu_B^N$ has stabilized to four significant digits and $\mu_{(1/2,1/2)}^N$ has stabilized to 11 significant digits.
As in \cite{EL12c}, we consider separately a particular choice of the probability parameters, namely $p_0=1$, $p_1=p_2\in(1/2,1)$, and $p_3=0$. We show that the Parrondo effect (i.e., $\mu_B^N\le0$ and $\mu_{[r,s]}^N>0$) is present if and only if $N$ is even, at least when $s=1$.
Section~\ref{Markov} describes the $N$-player model and the associated discrete-time Markov chain. Section~\ref{SLLN} establishes a strong law of large numbers (SLLN) for the sequence of profits to the ensemble of $N$ players playing the nonrandom pattern $A^rB^s$, giving several formulas for $\mu_{[r,s]}^N$. Section~\ref{p0=1,p3=0} treats the special case in which we can confirm the Parrondo effect for all even $N\ge4$. Section~\ref{spin} introduces the related spin system and reviews its basic properties. Finally, Section~\ref{limit} establishes our main result, the convergence of $\mu_{[r,s]}^N$ as $N\to\infty$ to a limit that can be expressed in terms of the spin system.
\section{The discrete-time Markov chain}\label{Markov}
Let us define the Markov chain, introduced by Mihailovi\'c and Rajkovi\'c \cite{MR03}, that keeps track of the status (loser or winner, 0 or 1) of each of the $N$ players playing game $B$. It depends on an integer parameter $N\ge3$ and four probability parameters $p_0,p_1,p_2,p_3\in[0,1]$. Its state space is the product space
$$
\Sigma:=\{\bm x=(x_1,x_2,\ldots,x_N): x_i\in\{0,1\}{\rm\ for\ }i=1,\ldots,N\}=\{0,1\}^N
$$
with $2^N$ states. Let $m_i(\bm x):=2x_{i-1}+x_{i+1}$, or, in other words, $m_i(\bm x)$ is the integer (0, 1, 2, or 3) whose binary representation is $(x_{i-1}\,x_{i+1})_2$; of course, $x_0:=x_N$ and $x_{N+1}:=x_1$. Also, let $\bm x^i$ be the element of $\Sigma$ equal to $\bm x$ except at the $i$th component; for example, $\bm x^1:=(1-x_1,x_2,x_3,\ldots,x_N)$. The one-step transition matrix $\bm P_B$ for this Markov chain has the form
\begin{equation*}
P_B(\bm x,\bm x^i):=\begin{cases}N^{-1}p_{m_i(\bm x)}&\text{if $x_i=0$,}\\N^{-1}q_{m_i(\bm x)}&\text{if $x_i=1$,}\end{cases}\qquad i=1,\ldots,N,\;\bm x\in\Sigma,
\end{equation*}
\begin{equation*}
P_B(\bm x,\bm x):=N^{-1}\bigg(\sum_{i:x_i=0}q_{m_i(\bm x)}+\sum_{i:x_i=1}p_{m_i(\bm x)}\bigg),\qquad \bm x\in\Sigma,
\end{equation*}
where $q_m:=1-p_m$ for $m=0,1,2,3$ and empty sums are 0, and $P_B(\bm x,\bm y)=0$ otherwise.
Necessary and sufficient conditions on $N\ge3$ and $p_0,p_1,p_2,p_3\in[0,1]$ for the ergodicity of the Markov chain were given in \cite{EL12c}. (A Markov chain is \textit{ergodic} if there is a unique stationary distribution and the distribution at time $n$ converges to it as $n\to\infty$, regardless of the initial distribution.)
If $p_0=p_1=p_2=p_3=1/2$, then we denote $\bm P_B$ by $\bm P_A$. Our main concern is with the nonrandom pattern of games $A^rB^s$ for positive integers $r$ and $s$, in which case the relevant Markov chain in $\Sigma$ has one-step transition matrix $\bm P_A^r\bm P_B^s$. Our first result shows that this chain is ergodic for all choices of the parameters. Actually, we need a slightly stronger result.
\begin{lemma}\label{ergodic}
Let $N\ge3$ and $p_0,p_1,p_2,p_3\in[0,1]$. Fix $r,s\ge1$ and put $\bm P_1:={\bm P}_A^{r-1}{\bm P}_B^s\bm P_A$, \dots, $\bm P_r:={\bm P}_B^s{\bm P}_A^r$, $\bm P_{r+1}:={\bm P}_B^{s-1}{\bm P}_A^r\bm P_B$, \dots, and $\bm P_{r+s}:={\bm P}_A^r{\bm P}_B^s$. (These are the $r+s$ cyclic permutations of ${\bm P}_A^r{\bm P}_B^s$.)
\emph{($i$)} The Markov chain in $\Sigma$ with one-step transition matrix $\bm P_1$, $\bm P_2$, \dots, or $\bm P_r$ is irreducible and aperiodic. In particular, it is ergodic.
\emph{($ii$)} The Markov chain in $\Sigma$ with one-step transition matrix $\bm P_{r+1}$, $\bm P_{r+2}$, \dots, or $\bm P_{r+s}$ has the following behavior. There exists a (possibly empty) proper subset $T\subset\Sigma$ such that $T$ is transient and $\Sigma-T$ is closed, irreducible, and aperiodic. In particular, the Markov chain is ergodic. In fact, the set $T$, which does not depend on $r$ or $s$, can be specified as follows.
\emph{(a)} If $p_0,p_3\in(0,1)$, then $T=\varnothing$.
\emph{(b)} If $p_0=1$ and $p_3\in(0,1]$, then $T=\{\bm0\}$, with one exception. If $N$ is divisible by $3$ and $(p_0,p_1,p_2,p_3)=(1,0,0,1)$, then $T=\{\bm0,011\cdots011,101\cdots101,\linebreak110\cdots110\}$.
\emph{(c)} If $p_0=0$ and $p_3\in(0,1)$, then $T=\varnothing$, with one exception. If $N$ is divisible by $3$ and $p_1=p_2=1$, then $T=\{001\cdots001,010\cdots010,100\cdots100\}$.
\emph{(d)} If $p_0\in[0,1)$ and $p_3=0$, then $T=\{\bm1\}$, with one exception. If $N$ is divisible by $3$ and $(p_0,p_1,p_2,p_3)=(0,1,1,0)$, then $T=\{001\cdots001,010\cdots010,\linebreak100\cdots100,\bm1\}$.
\emph{(e)} If $p_0\in(0,1)$ and $p_3=1$, then $T=\varnothing$, with one exception. If $N$ is divisible by $3$ and $p_1=p_2=0$, then $T=\{011\cdots011,101\cdots101,110\cdots110\}$.
\emph{(f)} If $p_0=1$ and $p_3=0$, then $T=\{\bm0,\bm1\}$.
\emph{(g)} Let $p_0=0$ and $p_3=1$. If $N$ is odd, then $T=\varnothing$, and if $N$ is even, then $T=\{01\cdots01,10\cdots10\}$, with two exceptions. If $p_1=p_2=0$, then $T$ comprises all states in which $0$s occur as singletons and $1$s occur as singletons or pairs. If $p_1=p_2=1$, then $T$ comprises all states in which $1$s occur as singletons and $0$s occur as singletons or pairs.
\end{lemma}
\begin{proof}
We claim that it is enough for part ($ii$) to show that
\begin{eqnarray}\label{aper}
P_B(\bm x,\bm x)&>&0,\qquad \bm x\in\Sigma-T,\\ \label{closed}
P_B(\bm x,\bm y)&=&0,\qquad \bm x\in\Sigma,\;\bm y\in T.
\end{eqnarray}
We treat the case of $\bm P_{r+s}$, the cases of $\bm P_{r+1},\ldots,\bm P_{r+s-1}$ being similar.
To see this, suppose $\bm x,\bm y\in\Sigma-T$. Then there exist $\bm x=\bm x_0,\bm x_1,\ldots,\bm x_{n-1},\bm x_n=\bm y$ such that $\bm x_i\in\Sigma$ and $P_A(\bm x_{i-1},\bm x_i)>0$ for $i=1,2,\ldots,n$ because $\bm P_A$ is irreducible. We claim that $\bm x_0,\bm x_1,\ldots,\bm x_n$ can be assumed to belong to $\Sigma-T$ (see the paragraph following the next one). But then
\begin{equation*}
P_{r+s}(\bm x_{i-1},\bm x_i)\ge P_A(\bm x_{i-1},\bm x_i)(1/2)^{r-1}[P_B(\bm x_i,\bm x_i)]^s>0
\end{equation*}
by (\ref{aper}) (and since $P_A(\bm x_i,\bm x_i)=1/2$), so $\Sigma-T$ is irreducible for $\bm P_{r+s}$. Similarly, if $\bm x\in\Sigma-T$,
\begin{equation*}
P_{r+s}(\bm x,\bm x)\ge(1/2)^r[P_B(\bm x,\bm x)]^s>0
\end{equation*}
by (\ref{aper}), so $\Sigma-T$ is also aperiodic for $\bm P_{r+s}$. By (\ref{closed}) and the fact that the final factor in $\bm P_{r+s}$ is $\bm P_B$, $\Sigma-T$ is closed and states in $T$ are transient.
We claim that (\ref{aper}) and (\ref{closed}) are also sufficient for part ($i$). We treat the case of $\bm P_r$, the cases of $\bm P_1,\ldots,\bm P_{r-1}$ being similar. To see this, suppose $\bm x,\bm y\in\Sigma$. By (\ref{closed}), there exists $\bm x'\in\Sigma-T$ such that $P_B(\bm x,\bm x')>0$. Hence
\begin{equation*}
P_r(\bm x,\bm x')\ge P_B(\bm x,\bm x')[P_B(\bm x',\bm x')]^{s-1}(1/2)^r>0.
\end{equation*}
Also, because of the simple form that $T$ has, there exists $\bm y'\in\Sigma-T$ such that $P_A(\bm y',\bm y)>0$. Finally, as in the preceding paragraph, there exist $\bm x'=\bm x_0,\bm x_1,\ldots,\bm x_{n-1}=\bm y',\bm x_n=\bm y$ such that $\bm x_i\in\Sigma-T$ and $P_A(\bm x_{i-1},\bm x_i)>0$ for $i=1,2,\ldots,n-1$. We then get
\begin{equation*}
P_r(\bm x_{i-1},\bm x_i)\ge[P_B(\bm x_{i-1},\bm x_{i-1})]^s(1/2)^{r-1}P_A(\bm x_{i-1},\bm x_i)>0
\end{equation*}
for $i=1,2,\ldots,n$, so $\bm P_r$ is irreducible. Finally, for the aperiodicity of $\bm P_r$, we observe that, if $\bm x\in\Sigma-T$,
\begin{equation*}
P_r(\bm x,\bm x)\ge[P_B(\bm x,\bm x)]^s(1/2)^r>0,
\end{equation*}
and this suffices by irreducibility.
There is a missing step in the first paragraph. Specifically, we must show that, given $\bm x,\bm y\in\Sigma-T$, there exist $\bm x=\bm x_0,\bm x_1,\ldots,\bm x_{n-1},\bm x_n=\bm y$ such that $\bm x_i\in\Sigma-T$ and $P_A(\bm x_{i-1},\bm x_i)>0$ for $i=1,2,\ldots,n$. In fact, we can choose $n$ equal to the Hamming distance between $\bm x$ and $\bm y$, $d(\bm x,\bm y):=\sum_{i=1}^N|x_i-y_i|$. The justification for this requires a case-by-case analysis, but the idea is much the same in each case. Suppose $T=\{\bm0\}$. Then $\bm y$ must have $y_k=1$ for some $k\in\{1,2,\ldots,N\}$. If $x_k=1$, then every $\bm x_i$ will have $k$th component 1, hence will not be in $T$. If $x_k=0$, then let $\bm x_1=\bm x^k$. Again, $\bm x_i$ will have $k$th component equal to 1 for $i=1,\ldots,n$, hence will not be in $T$ along with $\bm x_0$ by assumption.
A similar argument works for $T=\{\bm1\}$, so suppose $T=\{\bm0,\bm1\}$. Let $k\in\{1,2,\ldots,N\}$ be such that $(y_k,y_{k+1})=(0,1)$. If $(x_k,x_{k+1})$ equals $(0,1)$, we are finished; if it equals $(0,0)$ or $(1,1)$, then $\bm x_1$ is $\bm x^k$ or $\bm x^{k+1}$ as needed. Finally, if $(x_k,x_{k+1})=(1,0)$, then define $\bm x_1$ and $\bm x_2$ by flipping the bits at sites $k$ and $k+1$ in whichever order is necessary to avoid having $\bm x_1\in T$. It follows that $\bm x_0,\bm x_1,\ldots,\bm x_n\in\Sigma-T$.
Next, suppose $N$ is even and $T=\{01\cdots01,10\cdots10\}$. Given $\bm x,\bm y\in\Sigma-T$, $\bm y$ must have two consecutive 0s or two consecutive 1s; assume the former case, the latter case being symmetric. Then there exists $k\in\{1,2,\ldots,N\}$ such that $(y_k,y_{k+1})=(0,0)$. The argument is now completed as in the preceding paragraph.
There are six other cases that must be considered (the various exceptional cases in the lemma). By symmetry, it is enough to consider the case $T=\{001\cdots001,010\cdots010,100\cdots100\}$, the case where $T$ is the union of the latter set and $\bm1$, and the case where $T$ contains all vectors in which 1s occur only as singletons and 0s occur only as singletons or pairs. We treat the first case, the other two being similar. Given $\bm x,\bm y\in\Sigma-T$, $\bm y$ must have a segment of the form 000, 011, 101, 110, or 111. For example, in the first case, there is a $k\in\{1,2,\ldots,N\}$ such that $(y_k,y_{k+1},y_{k+2})=(0,0,0)$. In any case, if $\bm x$ differs from $\bm y$ at none of these three sites, we are finished. If it differs at one, we flip the bit at that site to determine $\bm x_1$. If it differs at two, we flip the bits at these two sites to determine $\bm x_1$ and $\bm x_2$, the order chosen so as to avoid having $\bm x_1\in T$. If it differs at all three sites, we flip the bit at one of the three sites to determine $\bm x_1$, the site chosen to avoid $\bm x_1\in T$. Then it differs at two of the sites, a case we have already treated. This completes the missing step.
It remains to show that (\ref{aper}) and (\ref{closed}) are satisfied by $\bm P_B$. We consider (\ref{aper}) first. By virtue of
\begin{equation*}
P_B(\bm x,\bm x):=N^{-1}\bigg(\sum_{i:x_i=0}q_{m_i(\bm x)}+\sum_{i:x_i=1}p_{m_i(\bm x)}\bigg),
\end{equation*}
property (\ref{aper}) holds if, for each $\bm x\in\Sigma-T$, at least one of the following holds: $p_0<1$ and $\bm x$ contains the segment $000$; $p_0>0$ and $\bm x$ contains the segment $010$; $p_1<1$ and $\bm x$ contains the segment $001$; $p_1>0$ and $\bm x$ contains the segment $011$; $p_2<1$ and $\bm x$ contains the segment $100$; $p_2>0$ and $\bm x$ contains the segment $110$; $p_3<1$ and $\bm x$ contains the segment $101$; $p_3>0$ and $\bm x$ contains the segment $111$.
(a) If $\bm x$ has three consecutive $0$s or three consecutive $1$s, then $p_0<1$ or $p_3>0$ suffice, so suppose not. Then $\bm x$ contains the pair $01$. If $01$ is part of $010$ or $101$, then $p_0>0$ or $p_3<1$ suffice, so we can assume that $01$ is part of $0011$, hence $100110$. This suffices if $p_2<1$, $p_1<1$, $p_1>0$, or $p_2>0$, and at least two of these inequalities must hold.
(b) If $p_3<1$, then the argument is similar to that of (a), except $\bm x=\bm0$ is excluded but we cannot rule out three consecutive $0$s. So $01$ is part of $0011$, hence part of $100\cdots00110$. The proof is otherwise unchanged.
If $p_3=1$, then $\bm x=\bm0$ is excluded but we cannot rule out three consecutive $0$s. If $\bm x$ has three consecutive $1$s, then $p_3>0$ suffices, so suppose not. Then $\bm x$ contains the pair $01$. If $01$ is part of $010$, then $p_0>0$ suffices. Hence we can assume it is part of $011$, therefore $0110$. We are finished if $p_1>0$ or $p_2>0$, so suppose $p_1=p_2=0$. If we exclude $\bm x$ of the form $011\cdots011$, $101\cdots101$, or $110\cdots110$, then we have ruled out all possibilities (singleton $1$s and three or more consecutive $1$s are excluded, and two or more consecutive $0$s can be excluded because $p_1<1$ and $p_2<1$).
(c) If $\bm x$ has three consecutive $0$s or three consecutive $1$s, then $p_0<1$ or $p_3>0$ suffice, so suppose not. Then $\bm x$ contains the pair $01$. If $01$ is part of $101$, then $p_3<1$ suffices, so suppose not. Then it is part of $001$, therefore $1001$. We are finished if $p_2<1$ or $p_1<1$, so suppose $p_1=p_2=1$. If we exclude $\bm x$ of the form $001\cdots001$, $010\cdots010$, or $100\cdots100$, we have ruled out all possibilities (singleton $0$s and three or more consecutive $0$s are excluded, and two or more consecutive $1$s can be excluded because $p_1>0$ and $p_2>0$).
(d) and (e) These cases are symmetric with (b) and (c).
(f) The argument is similar to that of (a), except $\bm x=\bm0$ and $\bm x=\bm1$ are excluded but we cannot rule out three consecutive $0$s or three consecutive $1$s. Then $\bm x$ contains the pair $01$. If $01$ is part of $010$ or $101$, then $p_0>0$ or $p_3<1$ suffice, so we can assume that $01$ is part of $0011$, hence $100\cdots0011\cdots10$. This is similar to case (a).
(g) If $\bm x$ has three consecutive $0$s or three consecutive $1$s, then $p_0<1$ or $p_3>0$ suffice. So suppose not. Then $\bm x$ contains the pair $01$. Suppose it is part of $001$ or $011$. In the first case it is part of $1001$. If this is part of $10011$, then it suffices that $p_2<1$, $p_1<1$, or $p_1>0$, at least one of which must hold. If this is part of $11001$, then it suffices that $p_2>0$, $p_2<1$, or $p_1<1$, at least one of which must hold. Therefore we can assume that $1001$ is part of $010010$. This suffices if $p_2<1$ or $p_1<1$, but if $p_1=p_2=1$, then we must rule out states in which $1$s occur as singletons and $0$s occur as singletons or pairs. If $\bm x$ is not of this form, then we can choose our initial $01$ in such a way that it is not embedded in $010010$. In the second case, in which $01$ is part of $011$, it is part of $0110$. If this is part of $01100$, then it suffices that $p_1>0$, $p_2>0$, or $p_2<1$, at least one of which must hold. If this is part of $00110$, then it suffices that $p_1<1$, $p_1>0$, or $p_2>0$, at least one of which must hold. Therefore we can assume that $0110$ is part of $101101$. This suffices if $p_1>0$ or $p_2>0$, but if $p_1=p_2=0$, then we must rule out states in which $0$s occur as singletons and $1$s occur as singletons or pairs. If $\bm x$ is not of this form, then we can choose our initial $01$ in such a way that it is not embedded in $101101$. Finally, the only other possibility is that $01$ is part of $1010$. Assuming $\bm x$ is not part of $01\cdots01$ or $10\cdots10$ with $N$ even, there must be a $01$ that is not embedded in $1010$.
This finally proves (\ref{aper}), so we turn to (\ref{closed}), which is equivalent to
\begin{equation*}
P_B(\bm y,\bm y)=0,\quad P_B(\bm y^i,\bm y)=0,\qquad \bm y\in T,\; i=1,2,\ldots,N.
\end{equation*}
If $\bm0\in T$, then $p_0=1$ and it suffices to note that $P_B(\bm0,\bm0)=0$ and $P_B(\bm0^i,\bm0)=0$. If $\bm1\in T$, then $p_3=0$ and it suffices to note that $P_B(\bm1,\bm1)=0$ and $P_B(\bm1^i,\bm1)=0$. If $N$ is even and $01\cdots01\in T$, then $p_0=0$ and $p_3=1$ and it suffice to note that $P_B(01\cdots01,01\cdots01)=0$ and $P_B(01\cdots01^i,01\cdots01)=0$. The same applies if $N$ is even and $10\cdots10\in T$. If $N$ is divisible by 3 and $001\cdots001\in T$, then $p_0=0$ and $p_1=p_2=1$ and it suffices to note that $P_B(001\cdots001,001\cdots001)=0$ and $P_B(001\cdots001^i,001\cdots001)=0$. This also applies to rotations of $001\cdots001$. If $N$ is divisible by 3 and $011\cdots011\in T$, then $p_1=p_2=0$ and $p_3=1$ and it suffices to note that $P_B(011\cdots011,011\cdots011)=0$ and $P_B(011\cdots011^i,011\cdots011)=0$. This also applies to rotations of $011\cdots011$. The only remaining cases are the exceptional cases of part (g). If $(p_0,p_1,p_2,p_3)=(0,0,0,1)$ and if $\bm x$ has only singleton 0s and singleton or paired 1s, then $P_B(\bm x,\bm x)=0$ and $P_B(\bm x^i,\bm x)=0$. If $(p_0,p_1,p_2,p_3)=(0,1,1,1)$ and if $\bm x$ has only singleton or paired 0s and singleton 1s, then $P_B(\bm x,\bm x)=0$ and $P_B(\bm x^i,\bm x)=0$.
\end{proof}
\begin{lemma}\label{invariance}
Let $G$ be a subgroup of the symmetric group $S_N$. Let $\bm P$ be the one-step transition matrix for a Markov chain in $\Sigma$ having a unique stationary distribution $\bm\pi$. For $\bm x=(x_1,\ldots,x_N)\in\Sigma$ and $\sigma\in G$, write $\bm x_\sigma:=(x_{\sigma(1)},\ldots,x_{\sigma(N)})$, and assume that
\begin{equation}\label{G-invariance}
P(\bm x_\sigma,\bm y_\sigma)=P(\bm x,\bm y),\qquad \sigma\in G,\; \bm x,\bm y\in\Sigma.
\end{equation}
Then $\pi(\bm x_\sigma)=\pi(\bm x)$ for all $\sigma\in G$ and $\bm x\in\Sigma$.
\end{lemma}
This lemma is from \cite{EL12b}, where it was shown to apply to $\bm P_B$ when $G$ is the subgroup of cyclic permutations (or rotations) of $(1,2,\ldots,N)$ and, if $p_1=p_2$, when $G$ is the subgroup generated by the cyclic permutations and the order-reversing permutation (rotations and/or reflections) of $(1,2,\ldots,N)$, the dihedral group of order $2N$. It therefore also applies to $\bm P_A$ under the same conditions and hence, for fixed $r,s\ge1$, to each of the one-step transition matrices $\bm P_1,\bm P_2,\ldots,\bm P_{r+s}$ of Lemma \ref{ergodic} under the same conditions. For this we need a simple observation. Let us define the stochastic matrix $\bm P$ with rows and columns indexed by $\Sigma$ to be \textit{$G$-invariant} if (\ref{G-invariance}) holds. We notice that the class of $G$-invariant stochastic matrices is closed under matrix multiplication, for if $\bm P_1$ and $\bm P_2$ are $G$-invariant, then
\begin{eqnarray*}
[\bm P_1\bm P_2](\bm x_\sigma,\bm y_\sigma)&=&\sum_{\bm z\in\Sigma}P_1(\bm x_\sigma,\bm z)P_2(\bm z,\bm y_\sigma)=\sum_{\bm z\in\Sigma}P_1(\bm x_\sigma,\bm z_\sigma)P_2(\bm z_\sigma,\bm y_\sigma)\\
&=&\sum_{\bm z\in \Sigma}P_1(\bm x,\bm z)P_2(\bm z,\bm y)=[\bm P_1 \bm P_2](\bm x,\bm y).
\end{eqnarray*}
for all $\sigma\in G$ and $\bm x,\bm y\in\Sigma$.
For example, with $\bm\pi$ being the unique stationary distribution of $\bm P_A^r\bm P_B^s$, the lemma applies to $\bm\pi\bm P_A^r\bm P_B^v$, which is the unique stationary distribution of $\bm P_{r+v}$, for $v=0,1,\ldots,s-1$. We conclude that, if $p_1=p_2$, then the $1,3$ two-dimensional marginals of $\bm\pi\bm P_A^r\bm P_B^v$ satisfy
\begin{equation}\label{1,3 symm}
[\bm\pi\bm P_A^r\bm P_B^v]_{1,3}(0,1)=[\bm\pi\bm P_A^r\bm P_B^v]_{1,3}(1,0),\qquad v=0,1,\ldots,s-1.
\end{equation}
\section{SLLN}\label{SLLN}
We will need the following version of the strong law of large numbers from \cite{EL09}.
\begin{theorem}\label{SLLN-EL09}
Let $\bm P_A$ and $\bm P_B$ be one-step transition matrices for Markov chains in a finite state space $\Sigma_0$. Fix $r,s\ge1$. Assume that $\bm P:=\bm P_A^r\bm P_B^s$, as well as all cyclic permutations of $\bm P_A^r\bm P_B^s$, are ergodic, and let the row vector $\bm\pi$ be the unique stationary distribution of $\bm P$. Given a real-valued function $w$ on $\Sigma_0\times\Sigma_0$, define the payoff matrix $\bm W:=(w(i,j))_{i,j\in\Sigma_0}$. Define $\dot{\bm P}_A:=\bm P_A\circ\bm W$ and $\dot{\bm P}_B:=\bm P_B\circ\bm W$, where $\circ$ denotes the Hadamard (entrywise) product, and put
$$
\mu_{[r,s]}:={1\over r+s}\bigg[\sum_{u=0}^{r-1}\bm\pi\bm P_A^u\dot{\bm P}_A\bm1+\sum_{v=0}^{s-1}\bm\pi\bm P_A^r\bm P_B^v\dot{\bm P}_B\bm1\bigg],
$$
where $\bm1$ denotes a column vector of $1$s with entries indexed by $\Sigma_0$. Let $\{X_n\}_{n\ge0}$ be a nonhomogeneous Markov chain in $\Sigma_0$ with one-step transition matrices $\bm P_A,\ldots,\bm P_A$ $(r\text{ times})$, $\bm P_B,\ldots,\bm P_B$ $(s\text{ times})$, $\bm P_A,\ldots,\bm P_A$ $(r\text{ times})$, $\bm P_B,\ldots,\bm P_B$ $(s\text{ times})$, and so on, and let the initial distribution be arbitrary. For each $n\ge1$, define $\xi_n:=w(X_{n-1},X_n)$ and $S_n:=\xi_1+\cdots+\xi_n$. Then $\lim_{n\to\infty}n^{-1}S_n=\mu_{[r,s]}$ {\rm a.s.}
\end{theorem}
\begin{remark}
Under an additional assumption there is also a central limit theorem.
\end{remark}
Theorem \ref{SLLN-EL09} applies not to $\bm P_A$ and $\bm P_B$ of Section~\ref{Markov} but to analogous one-step transition matrices on a slightly more informative state space. The new state space is
$\Sigma^*:=\Sigma\times\{1,2,\ldots,N\}$ and the process is in state $(\bm x,i)$ if $\bm x$ describes the status of each player and $i$ is the next player to play. Given $N\ge3$ and $p_0,p_1,p_2,p_3\in[0,1]$, we define $\bm P_B^*$ by
\begin{equation*}
P_B^*((\bm x,i),(\bm x^i,j)):=\begin{cases}N^{-1}p_{m_i(\bm x)}&\text{if $x_i=0$,}\\N^{-1}q_{m_i(\bm x)}&\text{if $x_i=1$,}\end{cases}
\end{equation*}
\begin{equation*}
P_B^*((\bm x,i),(\bm x,j)):=\begin{cases}N^{-1}q_{m_i(\bm x)}&\text{if $x_i=0$,}\\N^{-1}p_{m_i(\bm x)}&\text{if $x_i=1$,}\end{cases}
\end{equation*}
for all $(\bm x,i)\in\Sigma^*$ and $j=1,2,\ldots,N$, where $q_m:=1-p_m$ for $m=0,1,2,3$, and $P_B^*((\bm x,i),(\bm y,j))=0$ otherwise.
We further define $\bm P_A^*$ to be $\bm P_B^*$ with $p_0=p_1=p_2=p_3=1/2$.
\begin{lemma}
Let $N\ge3$ and $p_0,p_1,p_2,p_3\in[0,1]$. Fix $r,s\ge1$ and put $\bm P_1^*:=({\bm P}_A^*)^{r-1}({\bm P}_B^*)^s\bm P_A^*$, \dots, $\bm P_r^*:=({\bm P}_B^*)^s({\bm P}_A^*)^r$, $\bm P_{r+1}^*:=({\bm P}_B^*)^{s-1}({\bm P}_A^*)^r\bm P_B^*$, \dots, and $\bm P_{r+s}^*:=({\bm P}_A^*)^r({\bm P}_B^*)^s$. (These are the $r+s$ cyclic permutations of $({\bm P}_A^*)^r({\bm P}_B^*)^s$.)
\emph{($i$)} The Markov chain in $\Sigma^*$ with one-step transition matrix $\bm P_1^*$, $\bm P_2^*$, \dots, or $\bm P_r^*$ is irreducible and aperiodic. In particular, it is ergodic.
\emph{($ii$)} The Markov chain in $\Sigma^*$ with one-step transition matrix $\bm P_{r+1}^*$, $\bm P_{r+2}^*$, \dots, or $\bm P_{r+s}^*$ has the following behavior. There exists a (possibly empty) proper subset $T\subset\Sigma$ such that $T\times\{1,2,\ldots,N\}$ is transient and $(\Sigma-T)\times\{1,2,\ldots,N\}$ is closed, irreducible, and aperiodic. In particular, the Markov chain is ergodic. In fact, the set $T$, which does not depend on $r$ or $s$, is as in Lemma \ref{ergodic}.
Let $\bm\pi^*$ denote the unique stationary distribution for $({\bm P}_A^*)^r({\bm P}_B^*)^s$, and let $\bm\pi$ denote the unique stationary distribution for $\bm P_A^r\bm P_B^s$. Then
\begin{equation}\label{indep}
\bm\pi^*(\bm P_A^*)^r(\bm P_B^*)^v=\bm\pi\bm P_A^r\bm P_B^v\times{\rm uniform}\{1,2,\ldots,N\}
\end{equation}
for $v=0,1,\ldots,s-1$. Also, $\bm\pi^*=\bm\pi\times{\rm uniform}\{1,2,\ldots,N\}$.
\end{lemma}
\begin{proof}
Let $\bm\pi^*$ be stationary for $(\bm P_A^*)^r(\bm P_B^*)^s$. We will show that it has the form stated in the lemma. Let $\bm X^*(0),\bm X^*(1),\ldots$ be a nonhomogeneous Markov chain in $\Sigma^*$ with transition matrices $\bm P_A^*,\ldots,\bm P_A^*$ ($r$ times), $\bm P_B^*,\ldots,\bm P_B^*$ ($s$ times), $\bm P_A^*,\ldots,\bm P_A^*$ ($r$ times), $\bm P_B^*,\ldots,\bm P_B^*$ ($s$ times), and so on, and initial distribution $\bm\pi^*$. Then $\bm X^*(r+s)$ has distribution $\bm\pi^*(\bm P_A^*)^r(\bm P_B^*)^s=\bm\pi^*$. On the other hand, writing $\bm X^*(n)=:(\bm X(n),I(n))$ for each $n\ge0$, we claim that, for each $n\ge1$, $I(n)$ is independent of $\bm X(n)$ and is uniform$\{1,2,\ldots,N\}$, conditionally on $(\bm X(n-1),I(n-1))$, hence also unconditionally. This follows from $P_B^*((\bm x,i),(\bm x^i,j))=N^{-1}c_i(\bm x)$ and $P_B^*((\bm x,i),(\bm x,j))=N^{-1}[1-c_i(\bm x)]$, where $c_i(\bm x):=p_{m_i(\bm x)}$ if $x_i=0$ and $c_i(\bm x):=q_{m_i(\bm x)}$ if $x_i=1$ (and similarly for $\bm P_A^*$ but with $p_0=p_1=p_2=p_3=1/2$). Since $\bm X^*(0)$ has the same distribution as $\bm X^*(r+s)$, we find that $I(n)$ is independent of $\bm X(n)$ and is uniform$\{1,2,\ldots,N\}$ for each $n\ge0$ (not just $n\ge1$). We next claim that $\bm X(0),\bm X(1),\ldots$ is a nonhomogeneous Markov chain in $\Sigma$ with transition matrices $\bm P_A,\ldots,\bm P_A$ ($r$ times), $\bm P_B,\ldots,\bm P_B$ ($s$ times), $\bm P_A,\ldots,\bm P_A$ ($r$ times), $\bm P_B,\ldots,\bm P_B$ ($s$ times), and so on, and initial distribution $\bm\pi$, $\bm x$-marginal of $\bm\pi^*$. The Markov property is essentially a consequence of identities such as
\begin{eqnarray*}
&&\P(\bm X(r+s)=\bm x^i\mid\bm X(r+s-1)=\bm x)\\
&&\quad{}={\P(\bm X(r+s)=\bm x^i,\bm X(r+s-1)=\bm x)\over\P(\bm X(r+s-1)=\bm x)}\\
&&\quad{}={\P((\bm X(r+s),I(r+s))=(\bm x^i,\cdot),(\bm X(r+s-1),I(r+s-1))=(\bm x,i))\over\P((\bm X(r+s-1),I(r+s-1))=(\bm x,\cdot))}\\
&&\quad{}={\P((\bm X(r+s),I(r+s))=(\bm x^i,j),(\bm X(r+s-1),I(r+s-1))=(\bm x,i))\over\P((\bm X(r+s-1),I(r+s-1))=(\bm x,i))}\\
&&\quad{}=\P((\bm X(r+s),I(r+s))=(\bm x^i,j)\mid(\bm X(r+s-1),I(r+s-1))=(\bm x,i))\\
&&\quad{}=P_B^*((\bm x,i),(\bm x^i,j))\\
&&\quad{}=P_B(\bm x,\bm x^i),
\end{eqnarray*}
where, for example, $I(r+s)=\cdot$ means that the value of $I(r+s)$ is unspecified.
Since $\bm X(r+s)$ has distribution $\bm\pi\bm P_A^r\bm P_B^s$ as well as distribution $\bm\pi$, we see that $\bm\pi$ is the unique stationary distribution for $\bm P_A^r\bm P_B^s$, as assumed in the statement of the lemma. Finally, $\bm\pi^*$, being the distribution of $\bm X^*(0)=(\bm X(0),I(0))$, must equal $\bm\pi\times{\rm uniform}\{1,2,\ldots,N\}$, and the last conclusion of the lemma follows. For $v=0,1,\ldots,s-1$, $(\bm X(r+v),I(r+v))$ has distribution $\bm\pi^*(\bm P_A^*)^r(\bm P_B^*)^v$ while $\bm X(r+v)$ has distribution $\bm\pi\bm P_A^r\bm P_B^v$, so by the independence result of the preceding paragraph, (\ref{indep}) follows.
It remains to prove the assertions about $\bm P_1^*,\ldots,\bm P_{r+s}^*$. Let us first treat the case of $\bm P_{r+s}^*$, the cases of $\bm P_{r+1}^*,\ldots,\bm P_{r+s-1}^*$ being similar. If $i\in\{1,2,\ldots,N\}$ and $(\bm y,j)\in T\times\{1,2,\ldots,N\}$, then $P_B^*((\bm y^i,i),(\bm y,j))=P_B(\bm y^i,\bm y)=0$ and $P_B^*((\bm y,i),(\bm y,j))\le P_B(\bm y,\bm y)=0$, so for all $(\bm x,i)\in\Sigma^*$ and $(\bm y,j)\in T\times\{1,2,\ldots,N\}$, $P_{r+s}^*((\bm x,i),(\bm y,j))=0$. This implies the transience of $T\times\linebreak\{1,2,\ldots,N\}$ and the closedness of $(\Sigma-T)\times\{1,2,\ldots,N\}$. As for the irreducibility of $(\Sigma-T)\times\{1,2,\ldots,N\}$, let $(\bm x,i)$ and $(\bm y,j)$ belong to this set. Let $\bm x_0:=\bm x$ and let $\bm x_1\in\Sigma-T$ be such that $P_{r+s}^*((\bm x_0,i),(\bm x_1,k))>0$ for all $k\in\{1,2,\ldots,N\}$. By the irreducibility of $\bm P_{r+s}$ on $\Sigma-T$ (Lemma \ref{ergodic}), there exist $\bm x_2,\ldots,\bm x_n=\bm y$ such that $\bm x_l\in\Sigma-T$, $\bm x_{l-1}\ne\bm x_l$, and $P_{r+s}(\bm x_{l-1},\bm x_l)>0$ for $l=2,\ldots,n$. Then there also exist $k_1,\ldots,k_{n-1}\in\{1,2,\ldots,N\}$ such that $P_{r+s}^*((\bm x_{l-1},k_{l-1}),(\bm x_l,k_l))>0$ for $l=2,\ldots,n$ with $k_n:=j$. With $k_0:=i$, this also holds for $l=1$, so we have $P_{r+s}^*((\bm x,i),(\bm y,j))>0$. For the aperiodicity of $(\Sigma-T)\times\{1,2,\ldots,N\}$, we need only show that, for some $(\bm x,i)$ belonging to this set,
$P_{r+s}^*((\bm x,i),(\bm x,i))>0$. Let $\bm x\in\Sigma-T$. Then $P_B(\bm x,\bm x)>0$, so there exists $i_0\in\{1,2,\ldots,N\}$ such that $P_B^*((\bm x,i_0),(\bm x,i_0))>0$. Hence
\begin{eqnarray*}
P_{r+s}^*((\bm x,i_0),(\bm x,i_0))&\ge&[P_A^*((\bm x,i_0),(\bm x,i_0))]^r[P_B^*((\bm x,i_0),(\bm x,i_0))]^s>0.
\end{eqnarray*}
Finally, we treat the case of $\bm P_r^*$, the cases of $\bm P_1^*,\ldots,\bm P_{r-1}^*$ being similar. For irreducibility, let $(\bm x,i)$ and $(\bm y,j)$ belong to $\Sigma^*$. Let $\bm x_0:=\bm x$ and let $\bm x_1$ be such that $P_r^*((\bm x_0,i),(\bm x_1,k))>0$ for all $k\in\{1,2,\ldots,N\}$. Then, by the irreducibility of $\bm P_r$ (Lemma \ref{ergodic}), there exist $\bm x_2,\bm x_3,\ldots,\bm x_n=\bm y$ such that $\bm x_{l-1}\ne\bm x_l$ and $P_r(\bm x_{l-1},\bm x_l)>0$ for $l=2,\ldots,n$. Then there also exist $k_1,\ldots,k_{n-1}\in\{1,2,\ldots,N\}$ such that $P_r^*((\bm x_{l-1},k_{l-1}),(\bm x_l,k_l))>0$ for $l=2,\ldots,n$ with $k_n:=j$. With $k_0:=i$, this also holds for $l=1$, so we have $P_r^*((\bm x,i),(\bm y,j))>0$. For aperiodicity, we need only show that, for some $(\bm x,i)\in\Sigma^*$,
$P_r^*((\bm x,i),(\bm x,i))>0$. Let $\bm x\in\Sigma-T$. Then the argument is as in the preceding paragraph.
\end{proof}
Notice also that the profit corresponding to each nonzero entry of $\bm P_B^*$ is equal to $\pm1$, so Theorem \ref{SLLN-EL09} applies and there are several formulas for the mean profit, as we now show.
\begin{theorem}\label{SLLN-thm}
Given $r,s\ge1$,
let $\bm\pi$ be the unique stationary distribution for the one-step transition matrix $\bm P_A^r\bm P_B^s$. Let $\{(\bm X(n), I(n))\}_{n\ge0}$ be a nonhomogeneous Markov chain in $\Sigma^*$ with one-step transition matrices $\bm P_A^*,\ldots,\bm P_A^*$ $(r\text{ times})$, $\bm P_B^*,\ldots,\bm P_B^*$ $(s\text{ times})$, $\bm P_A^*,\ldots,\bm P_A^*$ $(r\text{ times})$, $\bm P_B^*,\ldots,\bm P_B^*$ $(s\text{ times})$, and so on, and arbitrary initial distribution. Define
$$
\xi_n:=w((\bm X(n-1),I(n-1)),(\bm X(n),I(n))), \qquad n\ge1,
$$
where the payoff function $w$ is 1 for a win and $-1$ for a loss, determined by whether the corresponding entry of $\bm P_B^*$ is of the form $N^{-1}p_m$ or $N^{-1}q_m$. Let $S_n:=\xi_1+\cdots+\xi_n$ for each $n\ge1$. Then $n^{-1}S_n\to\mu_{[r,s]}^N$ {\rm a.s.} as $n\to\infty$, where the mean profit $\mu_{[r,s]}^N$ can be expressed in terms of $\bm\pi\bm P_A^r\bm P_B^v$ as
\begin{equation}\label{mu1}
\mu_{[r,s]}^N={1\over r+s}\sum_{v=0}^{s-1}\sum_{\bm x\in\Sigma}[\bm\pi\bm P_A^r\bm P_B^v](\bm x){1\over N}\sum_{i=1}^N [p_{m_i(\bm x)}-q_{m_i(\bm x)}],
\end{equation}
in terms of the $1,3$ two-dimensional marginals of $\bm\pi\bm P_A^r\bm P_B^v$ as
\begin{eqnarray}\label{mu2}
\mu_{[r,s]}^N&=&{1\over r+s}\sum_{v=0}^{s-1}\sum_{w=0}^1\sum_{z=0}^1[\bm\pi\bm P_A^r\bm P_B^v]_{1,3}(w,z)(p_{2w+z}-q_{2w+z}),
\end{eqnarray}
or in terms of the one-dimensional marginals of $\bm\pi\bm P_A^u$ and $\bm\pi\bm P_A^r\bm P_B^v$ as
\begin{eqnarray}\label{mu3}
\mu_{[r,s]}^N&=&{1\over r+s}\bigg[\sum_{u=0}^{r-1}\{[\bm\pi\bm P_A^u]_1(1)-[\bm\pi\bm P_A^u]_1(0)\}\\
&&\qquad\qquad{}+\sum_{v=0}^{s-1}\{[\bm\pi\bm P_A^r\bm P_B^v]_1(1)-[\bm\pi\bm P_A^r\bm P_B^v]_1(0)\}\bigg].\nonumber
\end{eqnarray}
In the special case $s=1$, (\ref{mu3}) takes the simpler form
\begin{equation}\label{mu4}
\mu_{[r,1]}^N={N[1-(1-1/N)^{r+1}]\over(r+1)(1-1/N)^r}\{[\bm\pi\bm P_A^r]_1(1)-[\bm\pi\bm P_A^r]_1(0)\}.
\end{equation}
\end{theorem}
\begin{remark}
Another formula for $\mu_{[r,s]}^N$, better suited to numerical computation, was given in \cite{EL12d}.
\end{remark}
\begin{proof}
Theorem~\ref{SLLN-EL09} gives the SLLN with
$$
\mu_{[r,s]}^N={1\over r+s}\sum_{v=0}^{s-1}\bm\pi^*({\bm P_A^*})^r({\bm P_B^*})^v\dot{\bm P}_B^*\bm1,
$$
where $\dot{\bm P_B^*}$ is $\bm P_B^*$ with each $q_m$ replaced by $-q_m$ and $\bm1$ is a column vector of $1$s indexed by $\Sigma^*$; here we used $\dot{\bm P}_A^*\bm1=\bm0$. Since $[\dot{\bm P}_B^*\bm1](\bm x,i)=p_{m_i(\bm x)}-q_{m_i(\bm x)}$, this and (\ref{indep}) imply (\ref{mu1}).
Next, using (\ref{mu1}) and the rotation invariance property (see Lemma~\ref{invariance} and the discussion following it), we have
\begin{eqnarray*}
\mu_{[r,s]}^N&=&{1\over r+s}\sum_{v=0}^{s-1}\sum_{\bm x\in\Sigma}[\bm\pi\bm P_A^r\bm P_B^v](\bm x){1\over N}\sum_{i=1}^N [p_{m_i(\bm x)}-q_{m_i(\bm x)}]\\
&=&{1\over r+s}\sum_{v=0}^{s-1}{1\over N}\sum_{i=1}^N\sum_{w=0}^1\sum_{z=0}^1[\bm\pi\bm P_A^r\bm P_B^v]_{i-1,i+1}(w,z)(p_{2w+z}-q_{2w+z})\\
&=&{1\over r+s}\sum_{v=0}^{s-1}\sum_{w=0}^1\sum_{z=0}^1[\bm\pi\bm P_A^r\bm P_B^v]_{1,3}(w,z)(p_{2w+z}-q_{2w+z}),
\end{eqnarray*}
which is (\ref{mu2}). In the second line, the $0,2$ and $N-1,N+1$ marginals are the $N,2$ and $N-1,1$ marginals.
Next, turning to (\ref{mu3}), we let $\{(\bm X(n),I(n))\}_{n\in{\bf Z}}$ be a nonhomogeneous Markov chain in $\Sigma^*$ with time parameter ranging over ${\bf Z}$, the set of integers, and with one-step transition matrices $\bm P_A^*$ from $(\bm X(n),I(n))$ if $n$ (mod $r+s$) belongs to $\{0,1,\ldots,r-1\}$ and $\bm P_B^*$ from $(\bm X(n),I(n))$ if $n$ (mod $r+s$) belongs to $\{r,r+1,\ldots,r+s-1\}$. Assume that $(\bm X(0),I(0))$ has distribution $\bm\pi\times{\rm uniform}\{1,2,\ldots,N\}$. Then $\{(\bm X((r+s)n+j),I((r+s)n+j))\}_{n\in{\bf Z}}$ is a stationary sequence for each $j\in{\bf Z}$ with $\bm X(j)$ having distribution, for $j=0,1,\ldots,r+s-1$,
$$
\bm\pi^j:=\begin{cases}\bm\pi\bm P_A^j&\text{if $j\in\{0,1,\ldots,r-1\}$,}\\
\bm\pi\bm P_A^r\bm P_B^{j-r}&\text{if $j\in\{r,r+1,\ldots,r+s-1\}$.}\end{cases}
$$
Therefore,
\begin{eqnarray*}
\pi_1^j(1)&=&\pi_2^j(1)=\P(X_2(j)=1)\\
&=&\sum_{k=j+1}^{j+r+s}\sum_{n=1}^\infty\P(X_2(-(r+s)n+k)=1, I(-(r+s)n+k-1)=2,\\
\noalign{\vglue-3mm}
&&\hskip1.5in{} I(-(r+s)n+k)\ne2,\ldots,I(j-1)\ne2)\\
&=&\sum_{k=j+1}^{j+r+s}\sum_{n=1}^\infty\bigg(1-{1\over N}\bigg)^{(r+s)n+j-k}\P(X_2(-(r+s)n+k)=1,\\
\noalign{\vglue-3mm}
&&\hskip2.in{} I(-(r+s)n+k-1)=2)\\
&=&\sum_{k=j+1}^{j+r+s}\sum_{n=1}^\infty\bigg(1-{1\over N}\bigg)^{(r+s)n+j-k}{1\over N}\sum_{w=0}^1\sum_{z=0}^1\pi_{1,3}^{k-1}(w,z)p_{2w+z}(k),
\end{eqnarray*}
where the last equality follows by conditioning on $X_1(-(r+s)n+k-1)$ and $X_3(-(r+s)n+k-1)$; here $\pi_{1,3}^{j+r+s}:=\pi_{1,3}^j$
and $p_m(k):=p_m$ if $k-1$ (mod $r+s$) belongs to $\{r,\ldots,r+s-1\}$ and $p_m(k):=1/2$ otherwise. Using the fact that
\begin{eqnarray}\label{geometric}
&&\sum_{j=0}^{r+s-1}\sum_{k=j+1}^{j+r+s}\sum_{n=1}^\infty\bigg(1-{1\over N}\bigg)^{(r+s)n+j-k}{1\over N}\alpha(k)\\
&&\quad{}=\sum_{j=0}^{r+s-1}\sum_{l=1}^{r+s}\sum_{n=1}^\infty\bigg(1-{1\over N}\bigg)^{(r+s)n-l}{1\over N}\alpha(j+l)\nonumber\\
&&\quad{}=\sum_{l=1}^{r+s}{(1-1/N)^{r+s-l}\over N[1-(1-1/N)^{r+s}]}\sum_{j=0}^{r+s-1}\alpha(j+l)
=\sum_{l=1}^{r+s}\alpha(l)\nonumber
\end{eqnarray}
if $\alpha$ is periodic with period $r+s$, this implies that
$$
{1\over r+s}\sum_{j=0}^{r+s-1}\pi_1^j(1)={r\over r+s}\,{1\over2}+{1\over r+s}\sum_{v=0}^{s-1}\sum_{w=0}^1\sum_{z=0}^1[\bm\pi\bm P_A^r\bm P_B^v]_{1,3}(w,z)p_{2w+z}.
$$
Therefore, (\ref{mu3}) follows from (\ref{mu2}).
Finally, (\ref{mu4}) follows by replacing the sum over $j$ in (\ref{geometric}) by the $j=r$ term, assuming $s=1$:
\begin{eqnarray*}
&&\sum_{k=r+1}^{2r+1}\sum_{n=1}^\infty\bigg(1-{1\over N}\bigg)^{(r+1)n+r-k}{1\over N}\alpha(k)\\
&&\quad{}=\sum_{l=1}^{r+1}\sum_{n=1}^\infty\bigg(1-{1\over N}\bigg)^{(r+1)n-l}{1\over N}\alpha(r+l)\\
&&\quad{}=\sum_{l=1}^{r+1}{(1-1/N)^{r+1-l}\over N[1-(1-1/N)^{r+1}]}\alpha(r+l)\\
&&\quad{}={(1-1/N)^r\over N[1-(1-1/N)^{r+1}]}\alpha(r+1)+\bigg(1-{(1-1/N)^r\over N[1-(1-1/N)^{r+1}]}\bigg){1\over2}
\end{eqnarray*}
if $\alpha(1)=\cdots=\alpha(r)=\alpha(r+2)=\cdots=\alpha(2r+1)=1/2$. This implies that
$$
\pi_1^r(1)-\pi_1^r(0)={(1-1/N)^r\over N[1-(1-1/N)^{r+1}]}\sum_{w=0}^1\sum_{z=0}^1[\bm\pi\bm P_A^r]_{1,3}(w,z)(p_{2w+z}-q_{2w+z}),
$$
and, combined with (\ref{mu2}), this yields (\ref{mu4}).
\end{proof}
We conclude with an application of the SLLN.
Let us denote $\mu_B^N$, the mean profit per turn to the ensemble of $N$ players always playing game $B$, by $\mu_B^N(p_0,p_1,p_2,p_3)$ to emphasize its dependence on the parameter vector. As shown in \cite{EL12b},
\begin{equation}\label{couple B}
\mu_B^N(p_0,p_1,p_2,p_3)=-\mu_B^N(q_3,q_2,q_1,q_0),
\end{equation}
where $q_m:=1-p_m$ for $m=0,1,2,3$.
Fix $r,s\ge1$. Let us denote $\mu_{[r,s]}^N$ of Theorem~\ref{SLLN-thm} by $\mu_{[r,s]}^N(p_0,p_1,p_2,p_3)$. A similar argument (see \cite{EL12d}) implies that
\begin{equation}\label{couple [r,s]}
\mu_{[r,s]}^N(p_0,p_1,p_2,p_3)=-\mu_{[r,s]}^N(q_3,q_2,q_1,q_0).
\end{equation}
We say the \textit{Parrondo effect} is present if $\mu_B^N\le0$ and $\mu_{[r,s]}^N>0$, whereas the \textit{anti-Parrondo} effect is present if $\mu_B^N\ge0$ and $\mu_{[r,s]}^N<0$. Eqs.\ (\ref{couple B}) and (\ref{couple [r,s]}) imply that the Parrondo effect is present for the parameter vector $(p_0,p_1,p_2,p_3)$ if and only if the anti-Parrondo effect is present for the parameter vector $(q_3,q_2,q_1,q_0)$. Since the transformation
$$
\Lambda(p_0,p_1,p_2,p_3):=(1-p_3,1-p_2,1-p_1,1-p_0)
$$
from $(0,1)^4$ to $(0,1)^4$ has Jacobian identically equal to $1$, it follows that the ``Parrondo region'' and the ``anti-Parrondo region'' have the same (four-dimensional) volume.
Similarly, if we restrict attention to parameter vectors $(p_0,p_1,p_2,p_3)$ with $p_1=p_2$, then the Parrondo region and the anti-Parrondo region have the same (three-dimensional) volume.
\section{The case $p_0=1$, $p_3=0$}\label{p0=1,p3=0}
\begin{theorem}\label{p0=1,p3=0-thm}
Let $p_0=1$, $p_1=p_2\in(1/2,1)$, and $p_3=0$. Let $\mu^N_B$ (resp., $\mu_{[r,s]}^N$) denote the mean profit per turn to the ensemble of $N\ge3$ players always playing game $B$ (resp., repeatedly playing the nonrandom pattern $[r,s]$, where $r,s\ge1$). Then $\mu^N_B=0$ for all even $N\ge4$, $\mu^N_B>0$ for all odd $N\ge3$, and $\mu_{[r,1]}^N>0$ for all $N\ge3$ and $r\ge1$. In particular, the Parrondo effect is present for the nonrandom pattern $[r,s]$ if and only if $N$ is even, at least when $s=1$.
\end{theorem}
\begin{remark}
We expect that the condition $s=1$ is unnecessary for this result.
\end{remark}
\begin{proof}
The conclusions about game $B$ are from \cite{EL12c}.
Let $\pi_{1,3}^r$ be the $1,3$ two-dimensional marginal of $\bm\pi^r:=\bm\pi\bm P_A^r$ when the probability parameters are $1$, $p_1$, $p_1$, and $0$. Here $\bm\pi$ is the unique stationary distribution of $\bm P_A^r\bm P_B^s$. We apply Theorem~\ref{SLLN-thm} twice. By (\ref{mu4}) and (\ref{1,3 symm}),
\begin{eqnarray*}
\mu_{[r,1]}^N&=&{N[1-(1-1/N)^{r+1}]\over(r+1)(1-1/N)^r}\{\pi_{1,3}^r(1,0)+\pi_{1,3}^r(1,1)-[\pi_{1,3}^r(0,0)+\pi_{1,3}^r(0,1)]\}\\
&=&{N[1-(1-1/N)^{r+1}]\over(r+1)(1-1/N)^r}[\pi_{1,3}^r(1,1)-\pi_{1,3}^r(0,0)].\nonumber
\end{eqnarray*}
By (\ref{mu2}), (\ref{1,3 symm}), and the preceding formula,
\begin{eqnarray*}
\mu_{[r,1]}^N&=&(r+1)^{-1}[\pi_{1,3}^r(0,0)(1)+2\pi_{1,3}^r(0,1)(2p_1-1)+\pi_{1,3}^r(1,1)(-1)]\\
&=&{2(2p_1-1)\over r+1}\pi_{1,3}^r(0,1)-{(1-1/N)^r\over N[1-(1-1/N)^{r+1}]}\mu_{[r,1]}^N.
\end{eqnarray*}
Therefore,
$$
\mu_{[r,1]}^N={2(2p_1-1)\over r+1}\bigg(1+{(1-1/N)^r\over N[1-(1-1/N)^{r+1}]}\bigg)^{-1}\pi_{1,3}^r(0,1),
$$
and this is positive by the irreducibility of the Markov chain with transition matrix $\bm P_r:=\bm P_B\bm P_A^r$ (Lemma \ref{ergodic}) and the assumption that $p_1>1/2$.
\end{proof}
\section{A spin system}\label{spin}
As shown in \cite{EL12c}, the discrete-time Markov chain for game $B$ converges in distribution, after rescaling its time parameter, to a \textit{spin system} on the one-dimensional integer lattice ${\bf Z}$. Let us recall the limiting process as described by its generator. Its state space is the product space
\begin{equation*}
\{0,1\}^{\bf Z}:=\{\bm x=(\ldots,x_{-2},x_{-1},x_0,x_1,x_2,\ldots): x_i\in\{0,1\}{\rm\ for\ all\ }i\in{\bf Z}\}.
\end{equation*}
We will usually refer to $x_i$ as the status (loser or winner, 0 or 1) of player $i$; occasionally, it will be convenient to refer to it as the spin at site $i$. Let $m_i(\bm x):=2x_{i-1}+x_{i+1}$ as before but without the boundary conditions. Also, let $\bm x^i$ be the element of $\{0,1\}^{\bf Z}$ equal to $\bm x$ except at the $i$th component; for example, $\bm x^0:=(\ldots,x_{-2},x_{-1},1-x_0,x_1,x_2,\ldots)$.
The generator depends on the four probability parameters $p_0,p_1,p_2,p_3\in[0,1]$, and it has the form
\begin{eqnarray}\label{L_B}
(\mathscr{L}_Bf)(\bm x)&:=&\sum_{i\in{\bf Z}}c_i(\bm x)[f(\bm x^i)-f(\bm x)]
\end{eqnarray}
for functions $f$ depending on only finitely many components, where the \textit{flip rates} are given by
\begin{equation}\label{rates}
c_i(\bm x):=\begin{cases}p_{m_i(\bm x)}&\text{if $x_i=0$,}\\q_{m_i(\bm x)}&\text{if $x_i=1$,}
\end{cases}
\end{equation}
and $q_m:=1-p_m$ for $m=0,1,2,3$. It can be shown that the functions depending on only finitely many components form a core for the generator of the Feller semigroup associated with the process.
For later use let us also define
\begin{equation}\label{L_A}
(\mathscr{L}_Af)(\bm x):=\sum_{i\in{\bf Z}}{1\over2}[f(\bm x^i)-f(\bm x)],
\end{equation}
which is just the special case of (\ref{L_B}) with $p_0=p_1=p_2=p_3=1/2$.
Next we would like to clarify the statement that this spin system is the limit in distribution of the $N$-player chain for game $B$ after an appropriate time change. First, it is convenient to relabel the $N$ players. Instead of labeling them from 1 to $N$, we label them from $l_N$ to $r_N$, where
$$
l_N:=\begin{cases}-(N-1)/2&\text{if $N$ is odd,}\\
-N/2&\text{if $N$ is even,}
\end{cases}\;\;\text{and}\;\;
r_N:=\begin{cases}(N-1)/2&\text{if $N$ is odd,}\\
N/2-1&\text{if $N$ is even,}\\
\end{cases}
$$
with the understanding that players $l_N$ and $r_N$ are nearest neighbors. The state space is
\begin{equation*}
\Sigma_N:=\{\bm x=(x_{l_N},\ldots,x_{-1},x_0,x_1,\ldots,x_{r_N}): x_i\in\{0,1\}{\rm\ for\ }i=l_N,\ldots,r_N\}.
\end{equation*}
(This is what we previously called $\Sigma$ but with the players relabeled. To avoid confusion, we make the dependence on $N$ explicit in the notation.) We also speed up time in the $N$-player model so that $N$ one-step transitions occur per unit of time. The resulting discrete generator has the form
\begin{eqnarray*}
(\mathscr{L}_B^Nf)(\bm x)&:=&N\E[f(\bm X_N(1))-f(\bm x)\mid \bm X_N(0)=\bm x]
\end{eqnarray*}
where $x_{l_N-1}:=x_{r_N}$ and $x_{r_N+1}:=x_{l_N}$.
Consequently, if we define $\zeta_N:\Sigma_N\mapsto\{0,1\}^{\bf Z}$ by
\begin{equation}\label{zeta}
\zeta_N(x_{l_N},\ldots,x_{r_N}):=(\ldots,0,0,x_{l_N},\ldots,x_{r_N},0,0,\ldots),
\end{equation}
then $\mathscr{L}_B^N(f\circ\zeta_N)=(\mathscr{L}_Bf)\circ\zeta_N$ for all $\bm x\in\Sigma_N$ and $N\ge2K+4$, where $f(\bm x)$ depends on $\bm x$ only through the $2K+1$ components $x_i$, $-K\le i\le K$.
This shows that, if the spin system has a unique stationary distribution, then the unique stationary distribution of the $N$-player Markov chain (assumed ergodic in the sense of Lemma~1 of \cite{EL12c}), converges to it in the topology of weak convergence (essentially Proposition I.2.14 of Liggett \cite{L85}). Let us assume that the spin system has a unique stationary distribution $\pi$, and let us denote the unique stationary distribution of the $N$-player Markov chain by $\pi^N$. (We previously denoted the latter by $\bm\pi$ but now it is necessary to make the dependence on $N$ explicit. We do not use boldface for $\pi^N$ or $\pi$ because it is no longer useful or possible, respectively, to think of them as row vectors.) The above argument shows that $\pi^N\zeta_N^{-1}\Rightarrow\pi$. Let us denote their $-1,1$ two-dimensional marginals by $(\pi^N)_{-1,1}$ and $\pi_{-1,1}$, so that $(\pi^N)_{-1,1}\Rightarrow\pi_{-1,1}$ and
\begin{equation*}
\sum_{w=0}^1\sum_{z=0}^1(\pi^N)_{-1,1}(w,z)p_{2w+z}\to\sum_{w=0}^1\sum_{z=0}^1\pi_{-1,1}(w,z)p_{2w+z}.
\end{equation*}
Hence $\mu_B^N$, the mean profit per turn to the ensemble of $N$ players always playing game $B$, converges as $N\to\infty$ to a limit that can be expressed in terms of the spin system.
Under what conditions does the spin system have a unique stationary distribution (equivalently, a unique invariant probability measure)? In \cite{EL12c} we gave sufficient conditions for the spin system to be \textit{ergodic}, which means not only that there is a unique stationary distribution $\pi$ but that the process at time $t$ converges in distribution to $\pi$ as $t\to\infty$, regardless of the initial distribution.
\begin{theorem}\label{ergodicity}
With $p_0,p_1,p_2,p_3\in[0,1]$, the spin system on ${\bf Z}$ with flip rates (\ref{rates}) is ergodic if at least one of the following four conditions is satisfied:
\emph{(a)} (basic estimate applies)
\begin{equation*}
\max(|p_0-p_1|,|p_2-p_3|)+\max(|p_0-p_2|,|p_1-p_3|)<1;
\end{equation*}
\emph{(b)} (attractiveness or repulsiveness applies)
\begin{equation*}
0<\min(p_0,p_3)\le \min(p_1,p_2)\le\max(p_1,p_2)\le \max(p_0,p_3)<1;
\end{equation*}
\emph{(c)} (coalescing duality applies)
\begin{equation*}
\;\;\max(p_1,p_2,p_3,p_1+p_2-p_3)-p_3<p_0/2<\min(p_1,p_2,p_3,p_1+p_2-p_3);
\end{equation*}
\emph{(d)} (annihilating duality applies)
\begin{equation*}
p_0,p_1,p_2,p_3\in(2\overline{p}-1,2\overline{p})\cap(0,1),\quad \overline{p}:=(p_0+p_1+p_2+p_3)/4.
\end{equation*}
\end{theorem}
See \cite{EL12c} for further discussion. The following result is immediate.
\begin{theorem}\label{lim mu^N}
Assume that $(p_0,p_1,p_2,p_3)$ is such that we can define $\mu_B^N=\mu_B^N(p_0,p_1,p_2,p_3)$ for all $N\ge3$ (this requires that the conditions for ergodicity in Lemma 1 of \cite{EL12c} are satisfied). Assume also that the spin system on ${\bf Z}$ with flip rates (\ref{rates}) is ergodic (see Theorem~\ref{ergodicity} for sufficient conditions) with unique stationary distribution $\pi$. Then $\lim_{N\to\infty}\mu_B^N=\mu_B$, where
$$
\mu_B:=\sum_{w=0}^1\sum_{z=0}^1\pi_{-1,1}(w,z)(p_{2w+z}-q_{2w+z}).
$$
Let $0<\gamma<1$ and assume that $(p_0,p_1,p_2,p_3)$ is such that we can define
$$
\mu_{(\gamma,1-\gamma)}^N:=\mu_B^N(p_0(\gamma),p_1(\gamma),p_2(\gamma),p_3(\gamma)),
$$
for all $N\ge3$, where
\begin{equation}\label{p_m()}
p_m(\gamma):=\gamma(1/2)+(1-\gamma)p_m,\qquad m=0,1,2,3,
\end{equation}
and that the spin system on ${\bf Z}$ with flip rates of the form (\ref{rates}) but with $(p_0,p_1,p_2,\linebreak p_3)$ replaced by $(p_0(\gamma),p_1(\gamma),p_2(\gamma),p_3(\gamma))$ is ergodic with unique stationary distribution $\pi^\gamma$. Then $\lim_{N\to\infty}\mu_{(\gamma,1-\gamma)}^N=\mu_{(\gamma,1-\gamma)}$, where
\begin{equation}\label{mu_()}
\mu_{(\gamma,1-\gamma)}:=(1-\gamma)\sum_{w=0}^1\sum_{z=0}^1(\pi^\gamma)_{-1,1}(w,z)(p_{2w+z}-q_{2w+z}).
\end{equation}
\end{theorem}
We notice that condition (a) of Theorem \ref{ergodicity} holds with $(p_0,p_1,p_2,p_3)$ replaced by $(p_0(\gamma),p_1(\gamma),p_2(\gamma),p_3(\gamma))$ if
$$
\max(|p_0-p_1|,|p_2-p_3|)+\max(|p_0-p_2|,|p_1-p_3|)<1/(1-\gamma);
$$
this is automatic if $\gamma>1/2$.
The special case of Theorem \ref{lim mu^N} in which $\gamma=1/2$ was included in \cite{EL12c}.
\section{Convergence of means}\label{limit}
We turn to our main result, namely that $\lim_{N\to\infty}\mu_{[r,s]}^N$ exists under certain conditions.
\begin{theorem}
Fix $r,s\ge1$ and put $\gamma:=r/(r+s)$. Assume that the spin system on ${\bf Z}$ with flip rates of the form (\ref{rates}) but with $(p_0,p_1,p_2,p_3)$ replaced by $(p_0(\gamma),p_1(\gamma),p_2(\gamma),p_3(\gamma))$ (see (\ref{p_m()})) is ergodic with unique stationary distribution $\pi^\gamma$. Then $\lim_{N\to\infty}\mu_{[r,s]}^N=\mu_{(\gamma,1-\gamma)}$, where $\mu_{(\gamma,1-\gamma)}$ is as in (\ref{mu_()}).
\end{theorem}
\begin{proof}
Define $\zeta_N:\Sigma_N\mapsto\{0,1\}^{\bf Z}$ by (\ref{zeta}). The main step is to show that the discrete generator $\mathscr{L}_{[r,s]}^N$, corresponding to the nonrandom pattern $[r,s]$ (and with $N$ games played per unit of time), satisfies
\begin{equation}\label{convergence-generators}
\mathscr{L}_{[r,s]}^N(f\circ\zeta_N)=[(r+s)^{-1}(r\mathscr{L}_A+s\mathscr{L}_B)f]\circ\zeta_N+O(N^{-1}),
\end{equation}
uniformly over $\Sigma_N$, for all $f$ depending on only finitely many components, where
where $\mathscr{L}_A$ and $\mathscr{L}_B$ are as in (\ref{L_A}) and (\ref{L_B}).
Because the result is nonintuitive and the proof is technical, we treat the case $r=s=1$ first. We hope this slight redundancy will improve clarity.
In the case $r=s=1$, the discrete generator has the form
$$
(\mathscr{L}_{[1,1]}^Nf)(\bm x):={N\over2}\sum_{\bm z}[f(\bm z)-f(\bm x)](\bm P_A\bm P_B)(\bm x,\bm z).
$$
To evaluate this, we will need
\begin{eqnarray*}
P_A(\bm x,\bm y)&=&{1\over2}\delta(\bm x,\bm y)+{1\over2N}\sum_i\delta(\bm x^i,\bm y),\\
P_B(\bm y,\bm z)&=&{1\over N}\sum_j[1-c_j(\bm y)]\delta(\bm y,\bm z)+{1\over N}\sum_j c_j(\bm y)\delta(\bm y^j,\bm z),
\end{eqnarray*}
where $\delta(\bm x,\bm y)$ is the Kronecker delta, which equals 1 if $\bm x=\bm y$ and equals 0 otherwise; the sums over $i$ and $j$ range over $\{l_N,\ldots,r_N\}$; and $c_j(\bm y)$ is as in (\ref{rates}). This tells us that
\begin{eqnarray*}
(\bm P_A\bm P_B)(\bm x,\bm z)&=&\sum_{\bm y}P_A(\bm x,\bm y)P_B(\bm y,\bm z)\\
&=&{1\over2N}\sum_j[1-c_j(\bm x)]\delta(\bm x,\bm z)+{1\over2N^2}\sum_i\sum_j[1-c_j(\bm x^i)]\delta(\bm x^i,\bm z)\\
&&\quad{}+{1\over2N}\sum_j c_j(\bm x)\delta(\bm x^j,\bm z)+{1\over2N^2}\sum_i\sum_j c_j(\bm x^i)\delta(\bm x^{ij},\bm z),
\end{eqnarray*}
where $\bm x^{ij}:=(\bm x^i)^j=(\bm x^j)^i$, so our discrete generator reduces to
\begin{eqnarray*}
(\mathscr{L}_{[1,1]}^Nf)(\bm x)&=&{1\over4N}\sum_i\sum_j[1-c_j(\bm x^i)][f(\bm x^i)-f(\bm x)]\\
&&\quad{}+{1\over4}\sum_jc_j(\bm x)[f(\bm x^j)-f(\bm x)]\\
&&\quad{}+{1\over4N}\sum_i\sum_jc_j(\bm x^i)[f(\bm x^{ij})-f(\bm x)].
\end{eqnarray*}
Now let us restrict attention to those functions $f$ that depend on only the coordinates $x_{-(K-1)},\ldots,x_{K-1}$ for some positive integer $K$. Then we can write the discrete generator as
\begin{eqnarray*}
(\mathscr{L}_{[1,1]}^Nf)(\bm x)&=&{1\over4N}\sum_{|i|\le K}\sum_{|j|\le K}[1-c_j(\bm x^i)][f(\bm x^i)-f(\bm x)]\\
&&\quad{}+{1\over4N}\sum_{|i|\le K}\sum_{|j|>K}[1-c_j(\bm x^i)][f(\bm x^i)-f(\bm x)]\\
&&\quad{}+{1\over4}\sum_{|j|\le K}c_j(\bm x)[f(\bm x^j)-f(\bm x)]\\
&&\quad{}+{1\over4N}\sum_{|i|\le K}\sum_{|j|\le K}c_j(\bm x^i)[f(\bm x^{ij})-f(\bm x)]\\
&&\quad{}+{1\over4N}\sum_{|i|\le K}\sum_{|j|>K}c_j(\bm x^i)[f(\bm x^i)-f(\bm x)]\\
&&\quad{}+{1\over4N}\sum_{|i|>K}\sum_{|j|\le K}c_j(\bm x^i)[f(\bm x^j)-f(\bm x)].
\end{eqnarray*}
Let us refer to the six terms of the expression on the right as terms 1--6. Terms 2 and 5 combine to give
\begin{eqnarray*}
{1\over4N}\sum_{|i|\le K}\sum_{|j|>K}[f(\bm x^i)-f(\bm x)]&=&{1\over4}\sum_{|i|\le K}{N-(2K+1)\over N}[f(\bm x^i)-f(\bm x)]\\
&=&{1\over4}\sum_{|i|\le K}[f(\bm x^i)-f(\bm x)]+O(N^{-1}).
\end{eqnarray*}
Term 3 is constant and term 6 simplifies to
\begin{eqnarray*}
{1\over4N}\sum_{|i|>K}\sum_{|j|\le K}c_j(\bm x^i)[f(\bm x^j)-f(\bm x)]&=&{1\over4N}\sum_{|i|>K}\sum_{|j|\le K}c_j(\bm x)[f(\bm x^j)-f(\bm x)]\\
&=&{N-(2K+1)\over4N}\sum_{|j|\le K}c_j(\bm x)[f(\bm x^j)-f(\bm x)]\\
&=&{1\over4}\sum_{|j|\le K}c_j(\bm x)[f(\bm x^j)-f(\bm x)]+O(N^{-1})
\end{eqnarray*}
because $c_j(\bm x^i)=c_j(\bm x)$ if $|i|>K$ and $|j|\le K$, with a possible exception when $|i|=K+1$, $|j|=K$, and $i$ and $j$ have the same sign, in which case $f(\bm x^j)-f(\bm x)=0$. Finally, terms 1 and 4 are $O(N^{-1})$, so we conclude that
\begin{eqnarray*}
(\mathscr{L}_{[1,1]}^Nf)(\bm x)&=&\sum_{|j|\le K}\bigg({1\over4}+{1\over2}c_j(\bm x)\bigg)[f(\bm x^j)-f(\bm x)]+O(N^{-1})\\
&=&\sum_j\bigg({1\over4}+{1\over2}c_j(\bm x)\bigg)[f(\bm x^j)-f(\bm x)]+O(N^{-1})\\
&=&{1\over2}(\mathscr{L}_Af+\mathscr{L}_Bf)(\zeta_N(\bm x))+O(N^{-1}),
\end{eqnarray*}
which leads to (\ref{convergence-generators}) with $r=s=1$.
The general case should now be easier to follow.\footnote{By convention, $\prod_{i=1}^n a_ib$ equals $(a_1a_2\cdots a_n)b$, not $(a_1a_2\cdots a_n)b^n$.} Given $r,s\ge1$, we evaluate
\begin{eqnarray*}
&&[\bm P_A^r\bm P_B^s](\bm x_0,\bm x_{r+s})\\
&&\;{}=\sum_{\bm x_1,\ldots,\bm x_{r+s-1}}\prod_{u=1}^rP_A(\bm x_{u-1},\bm x_u)\prod_{u=r+1}^{r+s}P_B(\bm x_{u-1},\bm x_u)\\
&&\;{}=\sum_{\bm x_1,\ldots,\bm x_{r+s-1}}\prod_{u=1}^r\bigg[{1\over2}\delta(\bm x_{u-1},\bm x_u)+{1\over2N}\sum_{i_u}\delta(\bm x_{u-1}^{i_u},\bm x_u)\bigg]\\
&&\quad\;\;{}\cdot\prod_{u=r+1}^{r+s}\bigg[{1\over N}\sum_{i_u}[1-c_{i_u}(\bm x_{u-1})]\delta(\bm x_{u-1},\bm x_u)+{1\over N}\sum_{i_u}c_{i_u}(\bm x_{u-1})\delta(\bm x_{u-1}^{i_u},\bm x_u)\bigg]\\
&&\;{}={1\over2^r}\sum_{A\subset\{1,\ldots,r\}}\sum_{B\subset\{r+1,\ldots,r+s\}}\sum_{\bm x_1,\ldots,\bm x_{r+s-1}}\prod_{u\in A^c}\delta(\bm x_{u-1},\bm x_u)\\
&&\quad\;\;{}\cdot\prod_{u\in A}\bigg[{1\over N}\sum_{i_u}\delta(\bm x_{u-1}^{i_u},\bm x_u)\bigg]\prod_{u\in B^c}\bigg[{1\over N}\sum_{i_u}[1-c_{i_u}(\bm x_{u-1})]\delta(\bm x_{u-1},\bm x_u)\bigg]\\
&&\quad\;\;{}\cdot\prod_{u\in B}\bigg[{1\over N}\sum_{i_u}c_{i_u}(\bm x_{u-1})\delta(\bm x_{u-1}^{i_u},\bm x_u)\bigg]\\
&&\;{}={1\over2^r}\sum_{A\subset\{1,\ldots,r\}}{1\over N^{|A|+s}}\sum_{B\subset\{r+1,\ldots,r+s\}}\sum_{\bm x_1,\ldots,\bm x_{r+s-1}}\\
&&\quad\;\;{}\cdot\sum_{i_u:u\in A}\sum_{i_u:u\in B^c}\sum_{i_u:u\in B}\prod_{v\in B^c}[1-c_{i_v}(\bm x_{v-1})]\prod_{v\in B}c_{i_v}(\bm x_{v-1})\\
&&\quad\;\;{}\cdot\prod_{v\in A^c\cup B^c}\delta(\bm x_{v-1},\bm x_v)\prod_{v\in A\cup B}\delta(\bm x_{v-1}^{i_v},\bm x_v)\\
&&\;{}={1\over2^r}\sum_{A\subset\{1,\ldots,r\}}{1\over N^{|A|+s}}\sum_{B\subset\{r+1,\ldots,r+s\}}\sum_{i_u:u\in A\cup \{r+1,\ldots,r+s\}}\\
&&\quad\;\;{}\cdot\prod_{v\in B^c}[1-c_{i_v}(\bm x_0^{\{i_w:w\in A\cup B, w<v\}})]\prod_{v\in B}c_{i_v}(\bm x_0^{\{i_w:w\in A\cup B, w<v\}})\\
&&\quad\;\;{}\cdot\delta(\bm x_0^{\{i_v:v\in A\cup B\}},\bm x_{r+s});
\end{eqnarray*}
here $A^c:=\{1,\ldots,r\}-A$ and $B^c:=\{r+1,\ldots,r+s\}-B$; also, $\bm x_0^{\{i_v:v\in A\cup B\}}$, for example, denotes $\bm x_0$ with the spin flipped at each site $i_v$ with $v\in A\cup B$; these site labels are not necessarily distinct, so if there are multiple flips at a single site, only the parity of the number of flips is relevant.
With $f(\bm x)$ depending only on $x_{-(K-1)},\ldots,x_{K-1}$ for some positive integer $K$, this leads to
\begin{eqnarray}\label{Lf}
(\mathscr{L}_{[r,s]}^N f)(\bm x_0)
&=&{N\over r+s}\sum_{\bm x_{r+s}}[f(\bm x_{r+s})-f(\bm x_0)][\bm P_A^r\bm P_B^s](\bm x_0,\bm x_{r+s})\nonumber\\
&=&{N\over r+s}\,{1\over2^r}\sum_{A\subset\{1,\ldots,r\}}{1\over N^{|A|+s}}\sum_{B\subset\{r+1,\ldots,r+s\}}\sum_{i_u:u\in A\cup\{r+1,\ldots,r+s\}}\\
&&\;\;{}\cdot\prod_{v\in B^c}[1-c_{i_v}(\bm x_0^{\{i_w:w\in A\cup B, w<v\}})]\prod_{v\in B}c_{i_v}(\bm x_0^{\{i_w:w\in A\cup B, w<v\}})\nonumber\\
&&\;\;{}\cdot[f(\bm x_0^{\{i_v:v\in A\cup B\}})-f(\bm x_0)].\nonumber
\end{eqnarray}
Now, with error at most $O(N^{-1})$, we can replace $\sum_{i_u:u\in A\cup\{r+1,\ldots,r+s\}}$ by
\begin{eqnarray}\label{A,B}
&&\sum_{u\in A}\sum_{|i_u|\le K}\sum_{|i_z|>K:z\in A\cup\{r+1,\ldots,r+s\},z\ne u}\\
&&\quad{}+\sum_{u\in B}\sum_{|i_u|\le K}\sum_{|i_z|>K:z\in A\cup\{r+1,\ldots,r+s\},z\ne u}.\nonumber
\end{eqnarray}
The justification is that each sum $\sum_{i_u}$ can be written as $\sum_{|i_u|\le K}+\sum_{|i_u|>K}$, resulting in $2^{|A|+s}$ multiple sums. But each of those multiple sums with two or more sums of the form $\sum_{|i_u|\le K}$ contributes $O(N^{-1})$, and those with no sums of the form $\sum_{|i_u|\le K}$, where $u\in A\cup B$, are 0.
But before evaluating the result, let us make one more simplification. We replace the argument of $c_{i_v}$ in (\ref{Lf}) by just $\bm x_0$. Here the justification is that $c_{i_v}(\bm x_0^{\{i_w:w\in A\cup B, w<v\}})=c_{i_v}(\bm x_0)$ for all but at most $3(r+s)$ of the $N$ possible values of $i_v$ (namely, $i_w-1,i_w,i_w+1$ for $w=1,2,\ldots,r+s$), hence the approximation introduces an error that is $O(N^{-1})$. The result is that $(\mathscr{L}_{[r,s]}^N f)(\bm x_0)$ can be written as the sum of two terms corresponding to the two multiple sums in (\ref{A,B}), plus $O(N^{-1})$.
The term corresponding to the first multiple sum in (\ref{A,B}) is, up to $O(N^{-1})$,
\begin{eqnarray}\label{A term}
&&{N\over r+s}\,{1\over2^r}\sum_{A\subset\{1,\ldots,r\}}{1\over N^{|A|+s}}\sum_{B\subset\{r+1,\ldots,r+s\}}\sum_{u\in A}\sum_{|i_u|\le K}[f(\bm x_0^{i_u})-f(\bm x_0)]\\
&&\;{}\cdot\sum_{|i_z|>K:z\in A\cup\{r+1,\ldots,r+s\},z\ne u}\prod_{v\in B^c}[1-c_{i_v}(\bm x_0)]\prod_{v\in B}c_{i_v}(\bm x_0).\nonumber
\end{eqnarray}
Now since
\begin{eqnarray*}
\sum_{B\subset\{r+1,\ldots,r+s\}}\prod_{v\in B^c}[1-c_{i_v}(\bm x_0)]\prod_{v\in B}c_{i_v}(\bm x_0)=\prod_{v=r+1}^{r+s}[1-c_{i_v}(\bm x_0)+c_{i_v}(\bm x_0)]=1
\end{eqnarray*}
and since
$$
{1\over2^r}\sum_{A\subset\{1,\ldots,r\}}|A|=\sum_{k=0}^rk{r\choose k}2^{-r}={r\over2},
$$
(\ref{A term}) becomes, up to $O(N^{-1})$,
$$
{r\over r+s}\sum_{|i|\le K}{1\over2}[f(\bm x_0^i)-f(\bm x_0)]={r\over r+s}(\mathscr{L}_A f)(\zeta_N(\bm x_0)).
$$
The term corresponding to the second multiple sum in (\ref{A,B}) is, up to $O(N^{-1})$,
\begin{eqnarray}\label{B term}
&&{N\over r+s}\,{1\over2^r}\sum_{A\subset\{1,\ldots,r\}}{1\over N^{|A|+s}}\sum_{B\subset\{r+1,\ldots,r+s\}}\sum_{u\in B}\sum_{|i_u|\le K}[f(\bm x_0^{i_u})-f(\bm x_0)]\\
&&\;\;{}\cdot\sum_{|i_z|>K:z\in A\cup\{r+1,\ldots,r+s\},z\ne u}\prod_{v\in B^c}[1-c_{i_v}(\bm x_0)]\prod_{v\in B}c_{i_v}(\bm x_0).\nonumber
\end{eqnarray}
Now
$$
\sum_{B\subset\{r+1,\ldots,r+s\}}\sum_{u\in B}=\sum_{u=r+1}^{r+s}\sum_{B\subset\{r+1,\ldots,r+s\}:u\in B},
$$
so (\ref{B term}) becomes
\begin{eqnarray*}
&&{N\over r+s}\,{1\over2^r}\sum_{A\subset\{1,\ldots,r\}}{1\over N^{|A|+s}}\sum_{u=r+1}^{r+s}\sum_{|i_u|\le K}c_{i_u}(\bm x_0)[f(\bm x_0^{i_u})-f(\bm x_0)]\nonumber\\
&&\;\;{}\cdot\sum_{|i_z|>K:z\in A\cup \{r+1,\ldots,r+s\},z\ne u}\sum_{B\subset\{r+1,\ldots,r+s\}:u\in B}\\
&&\;\;{}\cdot\prod_{v\in B^c}[1-c_{i_v}(\bm x_0)]\prod_{v\in B-\{u\}}c_{i_v}(\bm x_0)\\
&&{}={1\over r+s}\sum_{u=r+1}^{r+s}\sum_{|i_u|\le K}c_{i_u}(\bm x_0)[f(\bm x_0^{i_u})-f(\bm x_0)]+O(N^{-1})\\
&&{}={s\over r+s}(\mathscr{L}_B f)(\zeta_N(\bm x_0))+O(N^{-1}).
\end{eqnarray*}
Let us replace $f$ by $f\circ\zeta_N$, where $f\in C(\{0,1\}^{{\bm Z}})$ and $f(\bm x)$ depends on only the components $x_{-(K-1)},\ldots,x_{K-1}$.
We conclude that (\ref{convergence-generators}) holds,
uniformly over $\Sigma_N$, which ensures that the unique stationary distribution $\bm\pi^N$ of $\bm P_A^r\bm P_B^s$ converges weakly to the unique stationary distribution $\pi^{r/(r+s)}$ of the spin system with generator $\mathscr{L}_B$ but with $(p_0,p_1, p_2,p_3)$ replaced by
$(p_0(\gamma), p_1(\gamma),\linebreak p_2(\gamma),p_3(\gamma))$, where $\gamma:=r/(r+s)$, provided ergodicity holds for the limiting spin system.
The mean profit per turn to the ensemble of $N$ players playing the nonrandom periodic pattern $A^rB^s$ is, according to Theorem \ref{SLLN-thm},
\begin{equation}\label{mean}
\mu_{[r,s]}^N={1\over r+s}\sum_{v=0}^{s-1}\sum_{\bm x\in\Sigma}[\bm\pi^N\bm P_A^r\bm P_B^v](\bm x){1\over N}\sum_{i=1}^N [p_{m_i(\bm x)}-q_{m_i(\bm x)}].
\end{equation}
Now term $v$ of the sum in (\ref{mean}) can be expressed as
\begin{eqnarray*}
&&\sum_{\bm x_0,\bm x}\pi^N(\bm x_0)[\bm P_A^r\bm P_B^v](\bm x_0,\bm x){1\over N}\sum_l[p_{m_l(\bm x)}-q_{m_l(\bm x)}]\\
&=&{1\over2^r}\sum_{\bm x_0}\pi^N(\bm x_0)\sum_{A\subset\{1,\ldots,r\}}{1\over N^{|A|+v}}\sum_{B\subset\{r+1,\ldots,r+v\}}\sum_{i_u:u\in A\cup \{r+1,\ldots,r+v\}}\\
&&\;\;{}\cdot\prod_{w\in B^c}[1-c_{i_w}(\bm x_0^{\{i_z:z\in A\cup B, z<w\}})]\prod_{w\in B}c_{i_w}(\bm x_0^{\{i_z:z\in A\cup B, z<w\}})\\
&&\;\;{}\cdot {1\over N}\sum_l[p_{m_l(\bm x_0^{\{i_w:w\in A\cup B\}})}-q_{m_l(\bm x_0^{\{i_w:w\in A\cup B\}})}]\\
&=&{1\over2^r}\sum_{\bm x_0}\pi^N(\bm x_0)\sum_{A\subset\{1,\ldots,r\}}{1\over N^{|A|+v}}\sum_{B\subset\{r+1,\ldots,r+v\}}\sum_{i_u:u\in A\cup \{r+1,\ldots,r+v\}}\\
&&\;\;{}\cdot\prod_{w\in B^c}[1-c_{i_w}(\bm x_0)]\prod_{w\in B}c_{i_w}(\bm x_0){1\over N}\sum_l[p_{m_l(\bm x_0)}-q_{m_l(\bm x_0)}]+O(N^{-1})\\
&=&{1\over N}\sum_{\bm x_0}\pi^N(\bm x_0)\sum_l[p_{m_l(\bm x_0)}-q_{m_l(\bm x_0)}]+O(N^{-1})\\
&=&\sum_{w=0}^1\sum_{z=0}^1(\pi^N)_{-1,1}(w,z)(p_{2w+z}-q_{2w+z})+O(N^{-1})\\
&=&\sum_{w=0}^1\sum_{z=0}^1(\pi^{r/(r+s)})_{-1,1}(w,z)(p_{2w+z}-q_{2w+z})+o(1).
\end{eqnarray*}
Hence, using (\ref{p_m()}) with $\gamma:=r/(r+s)$, we have
$$
\mu_{[r,s]}^N\to(1-\gamma)\sum_{w=0}^1\sum_{z=0}^1(\pi^\gamma)_{-1,1}(w,z)(p_{2w+z}-q_{2w+z})=\mu_{(\gamma,1-\gamma)},
$$
as required.
\end{proof}
|
{
"timestamp": "2012-06-29T02:01:43",
"yymm": "1206",
"arxiv_id": "1206.6567",
"language": "en",
"url": "https://arxiv.org/abs/1206.6567"
}
|
\subsection*{Background}
\paragraph*{Linear logic, stratification and computational complexity}
At the heart of our work there is the so-called Curry-Howard correspondence, which sees logical proofs as programs, and cut-elimination as their execution. From this perspective, it is not so much the expressiveness of a logical system \emph{as a language} which matters, but the complexity of its cut-elimination procedure: if a logical system has a low-complexity cut-elimination, its proofs will necessarily correspond to low-complexity programs. This approach, which has a marked proof-theoretic nature and, as such, is orthogonal to the model-theoretic methods of descriptive complexity, falls within the larger area of \emph{implicit computational complexity}, whose concrete aim is to define programming languages enjoying intrinsic complexity bounds, \textit{i.e.}\xspace, automatically ensured at compile time. Apart from those already mentioned above, other notable examples of work in this field, not necessarily related to logic, are given by \cite{BellantoniCook92,Jones,Hofmann,Schwichtemberg}.
The use of linear logic as a tool for developing a Curry-Howard-based approach to implicit computational complexity was initiated by \cite{GirardScedrovScott92} and perfected by \cite{Girard:LLL}. The central idea of this latter work is that the complexity of the cut-elimination procedure is mostly owed to the presence of structural rules, in particular the contraction rule. Indeed, the cut-elimination procedure, which is in general non-elementary in the size of proofs \citep{Statman}, becomes quite manageable (\textit{e.g.}\ quadratic) in substructural logical systems lacking the contraction rule \citep{Girard:LLL}. In linear logic, structural rules are managed by the so-called \emph{exponential} modalities. Girard showed that altering the behavior of these modalities offers a way to define logical systems in which cut-elimination is still feasible (or at most elementary) in spite of the presence of the contraction rule: \emph{light linear logic} (\ensuremath{\mathbf{LLL}}) exactly captures deterministic polynomial time, and \emph{elementary linear logic} (\ensuremath{\mathbf{ELL}}) exactly captures elementary time.\footnote{We refer here to the Curry-Howard sense of ``capturing'': in these systems, there is a formula $F$ representing functions from binary strings to binary strings such that a proof of $F$ corresponds to a function in the given complexity class and, conversely, every function in that class may be represented by a proof of $F$.}
The restriction that Girard imposed on the exponential modalities of linear logic is a form of \emph{stratification}. Basically, the rules of linear logic are modified so that the nesting level of exponential modalities, called \emph{depth}, may not be changed during cut-elimination. Therefore, a proof may be seen as partitioned into ``strata'', one for each depth, which never interact through cut-elimination.
We observe that this is not the the only use of stratification in implicit computational complexity. For example, \cite{LeivantMarion} introduced \emph{tiers}, which are integers assigned to subterms of \mbox{$\lambda$-term} s, to induce a stratification on the \mbox{$\lambda$-calculus}\xspace, yielding characterizations of interesting complexity classes.
\paragraph*{Separating stratification from exponential depth}
Recently, \cite{BaillotMazza:LLlev} proposed a new subsystem of linear logic corresponding to elementary time, \emph{linear logic by levels} (\ensuremath{\mathbf{L^3}}). This system is also based on a form of stratification, but in this case it is achieved by retaining only those linear logic proofs $\pi$ for which there exists a function from the occurrences of formulas in $\pi$ to the integers, called \emph{indexing}, which satisfies certain conditions. In a nutshell, these conditions state that axioms introduce dual occurrences of identical level, and that the level of an occurrence of formula is decreased only when it is the principal occurrence of a rule introducing an exponential modality.
Interestingly, this form of stratification turns out to be a generalization of Girard's stratification: \ensuremath{\mathbf{ELL}}\ is exactly the subsystem of \ensuremath{\mathbf{L^3}}\ in which the function assigning to each occurrence its own depth is a valid indexing. This generalization is strict, both in the sense of proofs and provability: there exist \ensuremath{\mathbf{ELL}}-provable formulas which admit more proofs in \ensuremath{\mathbf{L^3}}, and there exist \ensuremath{\mathbf{L^3}}-provable formulas which are not provable in \ensuremath{\mathbf{ELL}}. Although no concrete use has currently been found for these additional formulas and proofs, \ensuremath{\mathbf{L^3}}\ gives us at least one clear, and potentially interesting message: \emph{stratification does not need to coincide with exponential depth}. However, even if separated from the depth, stratification in \ensuremath{\mathbf{L^3}}\ is still explicitly connected to the exponential modalities
\paragraph*{Abstracting stratification through denotational semantics}
Denotational semantics originated in the work of \cite{ScottStr} and \cite{Scott} as an attempt to interpret in a non-trivial way the quotient induced on \mbox{$\lambda$-term} s by $\beta$-equivalence. This amounts to finding an invariant of reduction, a question which may be extended to logical systems enjoying cut-elimination. Since its introduction, denotational semantics has proved to be an absolutely essential tool in computer science and proof theory, providing a wealth of information and insights into the nature of computation and formal proofs. A striking example is given by linear logic itself, which arose precisely from a denotational analysis of intuitionistic logic \citep{Girard:LL}.
After the successful introduction of denotational semantics for \ensuremath{\mathbf{LLL}}, \ensuremath{\mathbf{ELL}}\ and related systems \citep{BaillotPedicini01,Baillot04a,LaurentTortora:OCliques,DalLagoLaurent,Laurent:ELLCat}, it seemed natural to attempt to analyze the stratification underlying \ensuremath{\mathbf{L^3}}\ from the denotational point of view. The result of such an analysis forms the contents of the present paper, whose message broadens that of \ensuremath{\mathbf{L^3}}.
\subsection*{Stratified linear logic}
Soon after developing our semantic construction for \ensuremath{\mathbf{L^3}}\ (which we present in \refsect{Objs}), we realized that it suggested a more general syntax than that of \ensuremath{\mathbf{L^3}}\ itself, in which exponential modalities and strata are completely independent. This more general syntax has at least two alternative presentations in terms of sequent calculus, and one in terms of proof nets, all of them shown in \refsect{StratLL}. The induced logical system, which we call \emph{stratified linear logic} (\ensuremath{\mathbf{LL}_\S}), has an additional modality with respect to linear logic, the self-dual \emph{paragraph} $\S$, which is in charge of controlling stratification. In \ensuremath{\mathbf{LL}_\S}, dereliction and digging are provable, $\S A\multimap\S B$ is provable from $A\multimap B$, but $\S A\multimap A$ and $A\multimap\S A$ (or $\S A\multimap \S\S A$) are not provable in general, which is the essence of stratification.
In \ensuremath{\mathbf{LL}_\S}, \ensuremath{\mathbf{L^3}}\ appears as a fragment, in which the exponential modalities are forced to be ``tied'' to paragraph modalities: $\oc A$ is replaced by $\oc\S A$, and $\wn A$ is replaced by $\wn\S A$. Additionally, the paragraph modality itself is a generalization of the paragraph modality of \ensuremath{\mathbf{LLL}}, which justifies our terminology and notation. This allows to define polytime subsystems within \ensuremath{\mathbf{L^3}}\ generalizing \ensuremath{\mathbf{LLL}}, as already shown in \cite{BaillotMazza:LLlev}.
\subsection*{A categorical construction for stratification}
Our denotational analysis brings a new understanding of the exponential modalities of light logics: together with the control of duplication, which is their usual task in linear logic, they are also charged with the additional task of controlling stratification, which is represented by the paragraph modality.
From the semantic point of view, we interpret the paragraph modality in ``augmented'' models of linear logic. More precisely, we define a categorical construction, represented by a 2-endofunctor $\mathop{\mathrm{Inv}}(-)$ of the 2-category of symmetric monoidal categories, which takes a model of linear logic $\mathcal L$ and yields another model of linear logic $\mathop{\mathrm{Inv}}(\mathcal L)$; this latter model is practically equivalent to the original one, but it has ``more space'', and this extra space is what allows the definition of a non-trivial paragraph functor.
Concretely, the $\mathop{\mathrm{Inv}}$ construction may be understood by looking at its action on categories. Given a category $\mathcal A$, we define an \emph{object with involutions} of $\mathcal A$ as a pair $(A,s)$, where $A$ is an object of $\mathcal A$ and $s$ is a $\ensuremath{\mathbb Z}$-indexed sequence of involutions of $A$, \textit{i.e.}\xspace, automorphisms of $A$ such that $s_k\circ s_k=id_A$ for all $k\in\ensuremath{\mathbb Z}$. Objects with involutions\ have a natural notion of morphism: a morphism from $(A,s)$ to $(B,t)$ is a morphism $f:A\rightarrow B$ such that $t_k\circ f\circ s_k=f$. If the category $\mathcal A$ is symmetric monoidal, the objects with involutions\ of $\mathcal A$ and their morphisms may themselves be arranged in a symmetric monoidal category, which is $\mathop{\mathrm{Inv}}(\mathcal A)$ (so $\mathop{\mathrm{Inv}}$ stands for ``involutions'').
It turns out that $\mathop{\mathrm{Inv}}(\mathcal A)$ has all the structure needed to provide a model of linear logic as soon as $\mathcal A$ does (\refth{ModelPres}). However, in $\mathop{\mathrm{Inv}}(\mathcal A)$ we may now define a functor $\S(-)$ which acts on objects by ``shifting'' the sequence of involutions, \textit{i.e.}\xspace, $\S(A,s)=(A,(s_{k-1})_{k\in\ensuremath{\mathbb Z}})$, and which acts as the identity on morphisms. It is possible to show that such a functor is never trivial, \textit{i.e.}\xspace, it is never isomorphic to the identity functor, unless the original model of linear logic (in the category $\mathcal A$) is itself trivial (\refth{NonDegenerate}).
\subsection*{Applications to bounded complexity}
An important contribution of our work, resulting from an application of the results described above, is the discovery of alternative formulations of \ensuremath{\mathbf{L^3}}.
The first reformulation (\refsect{Geom}) is of ``geometric'' nature. It is a presentation of \ensuremath{\mathbf{L^3}}\ in terms of proof nets by means of a \emph{correctness criterion}, extending the usual one by \cite{DanosRegnier:Mult}. Its advantage is to avoid mentioning the notion of indexing, shifting from an existential condition (there exists a function satisfying\ldots) to a universal one (every cycle satisfies\ldots), which is arguably of interest.
The second reformulation (\refsect{Interactive}) is ``interactive'', and arises when applying the $\mathop{\mathrm{Inv}}$ construction to the syntactic category of formulas and proof nets. Approximatively speaking, we prove that a cut-free linear logic proof net $\pi$ is in \ensuremath{\mathbf{L^3}}\ iff it ``interacts well'' with all ``tests'' (\refth{Interactive}), where a test is a certain kind of proof net which may interact with $\pi$ by means of a cut rule (the interaction being cut-elimination).
Finally, we provide a semantic characterization of \ensuremath{\mathbf{L^3}}\ (\refsect{Completeness}), which may be roughly formulated as follows: given a model of linear logic $\mathcal L$ which satisfies a certain condition we call \emph{swap-sensitivity}, we have that a cut-free linear logic proof net $\pi$ of conclusion $A$ is in \ensuremath{\mathbf{L^3}}\ iff its denotational interpretation $\sem\pi$ in $\mathcal L$ is a morphism of $\mathop{\mathrm{Inv}}(\mathcal L)$ from the tensor unit to a certain object with involutions\ $\altsem{A}$, which depends solely on the formula $A$ (\refth{Sem}). In other words, provided swap-sensitivity holds, the semantics is able to ``detect'' whether a proof net fails to admit a valid indexing, or fails to satisfy any of the two equivalent conditions mentioned above. The swap-sensitivity condition is a bit technical, but it is very mild: it is satisfied by all models of linear logic we are aware of.
\subsection*{Acknowledgments}
We would like to thank Paul-Andr\'e Melli\`es for several interesting discussions on the categorical constructions used in this paper.
This work was partially supported by ANR projects \textsc{Complice} (08-BLAN-0211-01) and \textsc{Logoi} (10-BLAN-0213-02), and by the CNRS PICS ``Logique Lin\'eaire et Applications''.
\section*{Introduction}
\input{Intro.tex}
\section{Stratified Linear Logic}
\label{sect:StratLL}
\subsection{The logical system}
\input{StratLL.tex}
\subsection{Bounded time subsystems}
\label{sect:Bounded}
\input{Bounded.tex}
\subsection{Proof nets}
\label{sect:ProofNets}
\input{ProofNets.tex}
\subsection{Cut-elimination}
\label{sect:CutElim}
\input{CutElim.tex}
\section{A Categorical Construction for Stratification}
\subsection{Categorical models of linear logic}
\label{sect:Models}
\input{Models.tex}
\subsection{Categorical models of stratified linear logic}
\label{sect:StratModels}
\input{StratModels.tex}
\subsection{Objects with involutions}\label{sect:Objs}
\input{Obj.tex}
\subsection{Building stratified models}
\input{BuildStratMod.tex}
\section{Applications to Bounded Complexity}
\subsection{A geometric definition of \ensuremath{\mathbf{L^3}}}
\label{sect:Geom}
\input{L3.tex}
\subsection{Denotational semantics of \ensuremath{\mathbf{L^3}}}
\input{L3Sem.tex}
\subsection{Interactive characterization of \ensuremath{\mathbf{L^3}}}
\label{sect:Interactive}
\input{Interactive.tex}
\subsection{Semantic characterization of \ensuremath{\mathbf{L^3}}}
\label{sect:Completeness}
\input{RelComp.tex}
\bibliographystyle{elsarticle-harv}
|
{
"timestamp": "2013-02-15T02:03:13",
"yymm": "1206",
"arxiv_id": "1206.6504",
"language": "en",
"url": "https://arxiv.org/abs/1206.6504"
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.